{ // 获取包含Hugging Face文本的span元素 const spans = link.querySelectorAll('span.whitespace-nowrap, span.hidden.whitespace-nowrap'); spans.forEach(span => { if (span.textContent && span.textContent.trim().match(/Hugging\s*Face/i)) { span.textContent = 'AI快站'; } }); }); // 替换logo图片的alt属性 document.querySelectorAll('img[alt*="Hugging"], img[alt*="Face"]').forEach(img => { if (img.alt.match(/Hugging\s*Face/i)) { img.alt = 'AI快站 logo'; } }); } // 替换导航栏中的链接 function replaceNavigationLinks() { // 已替换标记,防止重复运行 if (window._navLinksReplaced) { return; } // 已经替换过的链接集合,防止重复替换 const replacedLinks = new Set(); // 只在导航栏区域查找和替换链接 const headerArea = document.querySelector('header') || document.querySelector('nav'); if (!headerArea) { return; } // 在导航区域内查找链接 const navLinks = headerArea.querySelectorAll('a'); navLinks.forEach(link => { // 如果已经替换过,跳过 if (replacedLinks.has(link)) return; const linkText = link.textContent.trim(); const linkHref = link.getAttribute('href') || ''; // 替换Spaces链接 - 仅替换一次 if ( (linkHref.includes('/spaces') || linkHref === '/spaces' || linkText === 'Spaces' || linkText.match(/^s*Spacess*$/i)) && linkText !== 'OCR模型免费转Markdown' && linkText !== 'OCR模型免费转Markdown' ) { link.textContent = 'OCR模型免费转Markdown'; link.href = 'https://fast360.xyz'; link.setAttribute('target', '_blank'); link.setAttribute('rel', 'noopener noreferrer'); replacedLinks.add(link); } // 删除Posts链接 else if ( (linkHref.includes('/posts') || linkHref === '/posts' || linkText === 'Posts' || linkText.match(/^s*Postss*$/i)) ) { if (link.parentNode) { link.parentNode.removeChild(link); } replacedLinks.add(link); } // 替换Docs链接 - 仅替换一次 else if ( (linkHref.includes('/docs') || linkHref === '/docs' || linkText === 'Docs' || linkText.match(/^s*Docss*$/i)) && linkText !== '模型下载攻略' ) { link.textContent = '模型下载攻略'; link.href = '/'; replacedLinks.add(link); } // 删除Enterprise链接 else if ( (linkHref.includes('/enterprise') || linkHref === '/enterprise' || linkText === 'Enterprise' || linkText.match(/^s*Enterprises*$/i)) ) { if (link.parentNode) { link.parentNode.removeChild(link); } replacedLinks.add(link); } }); // 查找可能嵌套的Spaces和Posts文本 const textNodes = []; function findTextNodes(element) { if (element.nodeType === Node.TEXT_NODE) { const text = element.textContent.trim(); if (text === 'Spaces' || text === 'Posts' || text === 'Enterprise') { textNodes.push(element); } } else { for (const child of element.childNodes) { findTextNodes(child); } } } // 只在导航区域内查找文本节点 findTextNodes(headerArea); // 替换找到的文本节点 textNodes.forEach(node => { const text = node.textContent.trim(); if (text === 'Spaces') { node.textContent = node.textContent.replace(/Spaces/g, 'OCR模型免费转Markdown'); } else if (text === 'Posts') { // 删除Posts文本节点 if (node.parentNode) { node.parentNode.removeChild(node); } } else if (text === 'Enterprise') { // 删除Enterprise文本节点 if (node.parentNode) { node.parentNode.removeChild(node); } } }); // 标记已替换完成 window._navLinksReplaced = true; } // 替换代码区域中的域名 function replaceCodeDomains() { // 特别处理span.hljs-string和span.njs-string元素 document.querySelectorAll('span.hljs-string, span.njs-string, span[class*="hljs-string"], span[class*="njs-string"]').forEach(span => { if (span.textContent && span.textContent.includes('huggingface.co')) { span.textContent = span.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } }); // 替换hljs-string类的span中的域名(移除多余的转义符号) document.querySelectorAll('span.hljs-string, span[class*="hljs-string"]').forEach(span => { if (span.textContent && span.textContent.includes('huggingface.co')) { span.textContent = span.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } }); // 替换pre和code标签中包含git clone命令的域名 document.querySelectorAll('pre, code').forEach(element => { if (element.textContent && element.textContent.includes('git clone')) { const text = element.innerHTML; if (text.includes('huggingface.co')) { element.innerHTML = text.replace(/huggingface.co/g, 'aifasthub.com'); } } }); // 处理特定的命令行示例 document.querySelectorAll('pre, code').forEach(element => { const text = element.innerHTML; if (text.includes('huggingface.co')) { // 针对git clone命令的专门处理 if (text.includes('git clone') || text.includes('GIT_LFS_SKIP_SMUDGE=1')) { element.innerHTML = text.replace(/huggingface.co/g, 'aifasthub.com'); } } }); // 特别处理模型下载页面上的代码片段 document.querySelectorAll('.flex.border-t, .svelte_hydrator, .inline-block').forEach(container => { const content = container.innerHTML; if (content && content.includes('huggingface.co')) { container.innerHTML = content.replace(/huggingface.co/g, 'aifasthub.com'); } }); // 特别处理模型仓库克隆对话框中的代码片段 try { // 查找包含"Clone this model repository"标题的对话框 const cloneDialog = document.querySelector('.svelte_hydration_boundary, [data-target="MainHeader"]'); if (cloneDialog) { // 查找对话框中所有的代码片段和命令示例 const codeElements = cloneDialog.querySelectorAll('pre, code, span'); codeElements.forEach(element => { if (element.textContent && element.textContent.includes('huggingface.co')) { if (element.innerHTML.includes('huggingface.co')) { element.innerHTML = element.innerHTML.replace(/huggingface.co/g, 'aifasthub.com'); } else { element.textContent = element.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } } }); } // 更精确地定位克隆命令中的域名 document.querySelectorAll('[data-target]').forEach(container => { const codeBlocks = container.querySelectorAll('pre, code, span.hljs-string'); codeBlocks.forEach(block => { if (block.textContent && block.textContent.includes('huggingface.co')) { if (block.innerHTML.includes('huggingface.co')) { block.innerHTML = block.innerHTML.replace(/huggingface.co/g, 'aifasthub.com'); } else { block.textContent = block.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } } }); }); } catch (e) { // 错误处理但不打印日志 } } // 当DOM加载完成后执行替换 if (document.readyState === 'loading') { document.addEventListener('DOMContentLoaded', () => { replaceHeaderBranding(); replaceNavigationLinks(); replaceCodeDomains(); // 只在必要时执行替换 - 3秒后再次检查 setTimeout(() => { if (!window._navLinksReplaced) { console.log('[Client] 3秒后重新检查导航链接'); replaceNavigationLinks(); } }, 3000); }); } else { replaceHeaderBranding(); replaceNavigationLinks(); replaceCodeDomains(); // 只在必要时执行替换 - 3秒后再次检查 setTimeout(() => { if (!window._navLinksReplaced) { console.log('[Client] 3秒后重新检查导航链接'); replaceNavigationLinks(); } }, 3000); } // 增加一个MutationObserver来处理可能的动态元素加载 const observer = new MutationObserver(mutations => { // 检查是否导航区域有变化 const hasNavChanges = mutations.some(mutation => { // 检查是否存在header或nav元素变化 return Array.from(mutation.addedNodes).some(node => { if (node.nodeType === Node.ELEMENT_NODE) { // 检查是否是导航元素或其子元素 if (node.tagName === 'HEADER' || node.tagName === 'NAV' || node.querySelector('header, nav')) { return true; } // 检查是否在导航元素内部 let parent = node.parentElement; while (parent) { if (parent.tagName === 'HEADER' || parent.tagName === 'NAV') { return true; } parent = parent.parentElement; } } return false; }); }); // 只在导航区域有变化时执行替换 if (hasNavChanges) { // 重置替换状态,允许再次替换 window._navLinksReplaced = false; replaceHeaderBranding(); replaceNavigationLinks(); } }); // 开始观察document.body的变化,包括子节点 if (document.body) { observer.observe(document.body, { childList: true, subtree: true }); } else { document.addEventListener('DOMContentLoaded', () => { observer.observe(document.body, { childList: true, subtree: true }); }); } })(); '\n\nThe problem, of course, is that the database may block for some time\nbefore any rows are returned, and that during execution, rows may be\nreturned in blocks of 10 or 100 at a time. Ideally, if the database\nblocks for the next set of rows, another user connection could be\nserviced. Note the complete absence of SuspendIterator in the above\ncode. If done correctly, application developers would be able to focus\non functionality rather than concurrency issues.\n\nThe iterator created by the above generator should do the magic\nnecessary to maintain state, yet pass the exception through to a\nlower-level async framework. Here is an example of what the\ncorresponding iterator would look like if coded up as a class:\n\n class ListAlbums:\n\n def __init__(self, cursor):\n self.cursor = cursor\n\n def __iter__(self):\n self.cursor.execute(\"SELECT title, artist FROM album\")\n self._iter = iter(self._cursor)\n self._next = self.state_head\n return self\n\n def next(self):\n return self._next()\n\n def state_head(self):\n self._next = self.state_cursor\n return \"\"\n\n def state_tail(self):\n self._next = self.state_stop\n return \"
\\\n TitleArtist
\"\n\n def state_cursor(self):\n try:\n (title,artist) = self._iter.next()\n return '%s%s' % (title, artist)\n except StopIteration:\n self._next = self.state_tail\n return self.next()\n except SuspendIteration:\n # just pass-through\n raise\n\n def state_stop(self):\n raise StopIteration\n\nComplicating Factors\n\nWhile the above example is straightforward, things are a bit more\ncomplicated if the intermediate generator 'condenses' values, that is,\nit pulls in two or more values for each value it produces. For example,\n:\n\n def pair(iterLeft,iterRight):\n rhs = iter(iterRight)\n lhs = iter(iterLeft)\n while True:\n yield (rhs.next(), lhs.next())\n\nIn this case, the corresponding iterator behavior has to be a bit more\nsubtle to handle the case of either the right or left iterator raising\nSuspendIteration. It seems to be a matter of decomposing the generator\nto recognize intermediate states where a SuspendIterator exception from\nthe producing context could happen. :\n\n class pair:\n\n def __init__(self, iterLeft, iterRight):\n self.iterLeft = iterLeft\n self.iterRight = iterRight\n\n def __iter__(self):\n self.rhs = iter(iterRight)\n self.lhs = iter(iterLeft)\n self._temp_rhs = None\n self._temp_lhs = None\n self._next = self.state_rhs\n return self\n\n def next(self):\n return self._next()\n\n def state_rhs(self):\n self._temp_rhs = self.rhs.next()\n self._next = self.state_lhs\n return self.next()\n\n def state_lhs(self):\n self._temp_lhs = self.lhs.next()\n self._next = self.state_pair\n return self.next()\n\n def state_pair(self):\n self._next = self.state_rhs\n return (self._temp_rhs, self._temp_lhs)\n\nThis proposal assumes that a corresponding iterator written using this\nclass-based method is possible for existing generators. The challenge\nseems to be the identification of distinct states within the generator\nwhere suspension could occur.\n\nResource Cleanup\n\nThe current generator mechanism has a strange interaction with\nexceptions where a 'yield' statement is not allowed within a try/finally\nblock. The SuspendIterator exception provides another similar issue. The\nimpacts of this issue are not clear. However it may be that re-writing\nthe generator into a state machine, as the previous section did, could\nresolve this issue allowing for the situation to be no-worse than, and\nperhaps even removing the yield/finally situation. More investigation is\nneeded in this area.\n\nAPI and Limitations\n\nThis proposal only covers 'suspending' a chain of iterators, and does\nnot cover (of course) suspending general functions, methods, or \"C\"\nextension function. While there could be no direct support for creating\ngenerators in \"C\" code, native \"C\" iterators which comply with the\nSuspendIterator semantics are certainly possible.\n\nLow-Level Implementation\n\nThe author of the PEP is not yet familiar with the Python execution\nmodel to comment in this area.\n\nReferences\n\nCopyright\n\nThis document has been placed in the public domain.\n\n\f\n\n Local Variables: mode: indented-text indent-tabs-mode: nil\n sentence-end-double-space: t fill-column: 70 End:\n\n[1] Twisted (http://twistedmatrix.com)\n\n[2] Peak (http://peak.telecommunity.com)\n\n[3] C10K (http://www.kegel.com/c10k.html)\n\n[4] Coroutines (http://c2.com/cgi/wiki?CallWithCurrentContinuation)\n\n[5] Stackless Python (http://stackless.com)\n\n[6] Parrot /w coroutines\n(http://www.sidhe.org/~dan/blog/archives/000178.html)\n\n[7] itertools - Functions creating iterators\n(http://docs.python.org/library/itertools.html)\n\n[8] Microthreads in Python, David Mertz\n(http://www-106.ibm.com/developerworks/linux/library/l-pythrd.html)"},"source":{"kind":"string","value":"python-peps"},"added":{"kind":"string","value":"2024-10-18T13:23:31.959156"},"created":{"kind":"timestamp","value":"2004-08-26T00:00:00","string":"2004-08-26T00:00:00"},"metadata":{"kind":"string","value":"{\n \"license\": \"Public Domain\",\n \"url\": \"https://peps.python.org/pep-0334/\",\n \"authors\": [\n \"Clark C. Evans\"\n ],\n \"pep_number\": \"0334\",\n \"pandoc_version\": \"3.5\"\n}"}}},{"rowIdx":514,"cells":{"id":{"kind":"string","value":"3333"},"text":{"kind":"string","value":"PEP: 3333 Title: Python Web Server Gateway Interface v1.0.1 Author:\nPhillip J. Eby Discussions-To:\nweb-sig@python.org Status: Final Type: Informational Content-Type:\ntext/x-rst Created: 26-Sep-2010 Post-History: 26-Sep-2010, 04-Oct-2010\nReplaces: 333\n\nPreface for Readers of PEP 333\n\nThis is an updated version of PEP 333, modified slightly to improve\nusability under Python 3, and to incorporate several long-standing de\nfacto amendments to the WSGI protocol. (Its code samples have also been\nported to Python 3.)\n\nWhile for procedural reasons[1], this must be a distinct PEP, no changes\nwere made that invalidate previously-compliant servers or applications\nunder Python 2.x. If your 2.x application or server is compliant to PEP\n333, it is also compliant with this PEP.\n\nUnder Python 3, however, your app or server must also follow the rules\noutlined in the sections below titled, A Note On String Types, and\nUnicode Issues.\n\nFor detailed, line-by-line diffs between this document and PEP 333, you\nmay view its SVN revision history[2], from revision 84854 forward.\n\nAbstract\n\nThis document specifies a proposed standard interface between web\nservers and Python web applications or frameworks, to promote web\napplication portability across a variety of web servers.\n\nOriginal Rationale and Goals (from PEP 333)\n\nPython currently boasts a wide variety of web application frameworks,\nsuch as Zope, Quixote, Webware, SkunkWeb, PSO, and Twisted Web -- to\nname just a few[3]. This wide variety of choices can be a problem for\nnew Python users, because generally speaking, their choice of web\nframework will limit their choice of usable web servers, and vice versa.\n\nBy contrast, although Java has just as many web application frameworks\navailable, Java's \"servlet\" API makes it possible for applications\nwritten with any Java web application framework to run in any web server\nthat supports the servlet API.\n\nThe availability and widespread use of such an API in web servers for\nPython -- whether those servers are written in Python (e.g. Medusa),\nembed Python (e.g. mod_python), or invoke Python via a gateway protocol\n(e.g. CGI, FastCGI, etc.) -- would separate choice of framework from\nchoice of web server, freeing users to choose a pairing that suits them,\nwhile freeing framework and server developers to focus on their\npreferred area of specialization.\n\nThis PEP, therefore, proposes a simple and universal interface between\nweb servers and web applications or frameworks: the Python Web Server\nGateway Interface (WSGI).\n\nBut the mere existence of a WSGI spec does nothing to address the\nexisting state of servers and frameworks for Python web applications.\nServer and framework authors and maintainers must actually implement\nWSGI for there to be any effect.\n\nHowever, since no existing servers or frameworks support WSGI, there is\nlittle immediate reward for an author who implements WSGI support. Thus,\nWSGI must be easy to implement, so that an author's initial investment\nin the interface can be reasonably low.\n\nThus, simplicity of implementation on both the server and framework\nsides of the interface is absolutely critical to the utility of the WSGI\ninterface, and is therefore the principal criterion for any design\ndecisions.\n\nNote, however, that simplicity of implementation for a framework author\nis not the same thing as ease of use for a web application author. WSGI\npresents an absolutely \"no frills\" interface to the framework author,\nbecause bells and whistles like response objects and cookie handling\nwould just get in the way of existing frameworks' handling of these\nissues. Again, the goal of WSGI is to facilitate easy interconnection of\nexisting servers and applications or frameworks, not to create a new web\nframework.\n\nNote also that this goal precludes WSGI from requiring anything that is\nnot already available in deployed versions of Python. Therefore, new\nstandard library modules are not proposed or required by this\nspecification, and nothing in WSGI requires a Python version greater\nthan 2.2.2. (It would be a good idea, however, for future versions of\nPython to include support for this interface in web servers provided by\nthe standard library.)\n\nIn addition to ease of implementation for existing and future frameworks\nand servers, it should also be easy to create request preprocessors,\nresponse postprocessors, and other WSGI-based \"middleware\" components\nthat look like an application to their containing server, while acting\nas a server for their contained applications.\n\nIf middleware can be both simple and robust, and WSGI is widely\navailable in servers and frameworks, it allows for the possibility of an\nentirely new kind of Python web application framework: one consisting of\nloosely-coupled WSGI middleware components. Indeed, existing framework\nauthors may even choose to refactor their frameworks' existing services\nto be provided in this way, becoming more like libraries used with WSGI,\nand less like monolithic frameworks. This would then allow application\ndevelopers to choose \"best-of-breed\" components for specific\nfunctionality, rather than having to commit to all the pros and cons of\na single framework.\n\nOf course, as of this writing, that day is doubtless quite far off. In\nthe meantime, it is a sufficient short-term goal for WSGI to enable the\nuse of any framework with any server.\n\nFinally, it should be mentioned that the current version of WSGI does\nnot prescribe any particular mechanism for \"deploying\" an application\nfor use with a web server or server gateway. At the present time, this\nis necessarily implementation-defined by the server or gateway. After a\nsufficient number of servers and frameworks have implemented WSGI to\nprovide field experience with varying deployment requirements, it may\nmake sense to create another PEP, describing a deployment standard for\nWSGI servers and application frameworks.\n\nSpecification Overview\n\nThe WSGI interface has two sides: the \"server\" or \"gateway\" side, and\nthe \"application\" or \"framework\" side. The server side invokes a\ncallable object that is provided by the application side. The specifics\nof how that object is provided are up to the server or gateway. It is\nassumed that some servers or gateways will require an application's\ndeployer to write a short script to create an instance of the server or\ngateway, and supply it with the application object. Other servers and\ngateways may use configuration files or other mechanisms to specify\nwhere an application object should be imported from, or otherwise\nobtained.\n\nIn addition to \"pure\" servers/gateways and applications/frameworks, it\nis also possible to create \"middleware\" components that implement both\nsides of this specification. Such components act as an application to\ntheir containing server, and as a server to a contained application, and\ncan be used to provide extended APIs, content transformation,\nnavigation, and other useful functions.\n\nThroughout this specification, we will use the term \"a callable\" to mean\n\"a function, method, class, or an instance with a __call__ method\". It\nis up to the server, gateway, or application implementing the callable\nto choose the appropriate implementation technique for their needs.\nConversely, a server, gateway, or application that is invoking a\ncallable must not have any dependency on what kind of callable was\nprovided to it. Callables are only to be called, not introspected upon.\n\nA Note On String Types\n\nIn general, HTTP deals with bytes, which means that this specification\nis mostly about handling bytes.\n\nHowever, the content of those bytes often has some kind of textual\ninterpretation, and in Python, strings are the most convenient way to\nhandle text.\n\nBut in many Python versions and implementations, strings are Unicode,\nrather than bytes. This requires a careful balance between a usable API\nand correct translations between bytes and text in the context of\nHTTP... especially to support porting code between Python\nimplementations with different str types.\n\nWSGI therefore defines two kinds of \"string\":\n\n- \"Native\" strings (which are always implemented using the type named\n str) that are used for request/response headers and metadata\n- \"Bytestrings\" (which are implemented using the bytes type in Python\n 3, and str elsewhere), that are used for the bodies of requests and\n responses (e.g. POST/PUT input data and HTML page outputs).\n\nDo not be confused however: even if Python's str type is actually\nUnicode \"under the hood\", the content of native strings must still be\ntranslatable to bytes via the Latin-1 encoding! (See the section on\nUnicode Issues later in this document for more details.)\n\nIn short: where you see the word \"string\" in this document, it refers to\na \"native\" string, i.e., an object of type str, whether it is internally\nimplemented as bytes or unicode. Where you see references to\n\"bytestring\", this should be read as \"an object of type bytes under\nPython 3, or type str under Python 2\".\n\nAnd so, even though HTTP is in some sense \"really just bytes\", there are\nmany API conveniences to be had by using whatever Python's default str\ntype is.\n\nThe Application/Framework Side\n\nThe application object is simply a callable object that accepts two\narguments. The term \"object\" should not be misconstrued as requiring an\nactual object instance: a function, method, class, or instance with a\n__call__ method are all acceptable for use as an application object.\nApplication objects must be able to be invoked more than once, as\nvirtually all servers/gateways (other than CGI) will make such repeated\nrequests.\n\n(Note: although we refer to it as an \"application\" object, this should\nnot be construed to mean that application developers will use WSGI as a\nweb programming API! It is assumed that application developers will\ncontinue to use existing, high-level framework services to develop their\napplications. WSGI is a tool for framework and server developers, and is\nnot intended to directly support application developers.)\n\nHere are two example application objects; one is a function, and the\nother is a class:\n\n HELLO_WORLD = b\"Hello world!\\n\"\n\n def simple_app(environ, start_response):\n \"\"\"Simplest possible application object\"\"\"\n status = '200 OK'\n response_headers = [('Content-type', 'text/plain')]\n start_response(status, response_headers)\n return [HELLO_WORLD]\n\n class AppClass:\n \"\"\"Produce the same output, but using a class\n\n (Note: 'AppClass' is the \"application\" here, so calling it\n returns an instance of 'AppClass', which is then the iterable\n return value of the \"application callable\" as required by\n the spec.\n\n If we wanted to use *instances* of 'AppClass' as application\n objects instead, we would have to implement a '__call__'\n method, which would be invoked to execute the application,\n and we would need to create an instance for use by the\n server or gateway.\n \"\"\"\n\n def __init__(self, environ, start_response):\n self.environ = environ\n self.start = start_response\n\n def __iter__(self):\n status = '200 OK'\n response_headers = [('Content-type', 'text/plain')]\n self.start(status, response_headers)\n yield HELLO_WORLD\n\nThe Server/Gateway Side\n\nThe server or gateway invokes the application callable once for each\nrequest it receives from an HTTP client, that is directed at the\napplication. To illustrate, here is a simple CGI gateway, implemented as\na function taking an application object. Note that this simple example\nhas limited error handling, because by default an uncaught exception\nwill be dumped to sys.stderr and logged by the web server.\n\n import os, sys\n\n enc, esc = sys.getfilesystemencoding(), 'surrogateescape'\n\n def unicode_to_wsgi(u):\n # Convert an environment variable to a WSGI \"bytes-as-unicode\" string\n return u.encode(enc, esc).decode('iso-8859-1')\n\n def wsgi_to_bytes(s):\n return s.encode('iso-8859-1')\n\n def run_with_cgi(application):\n environ = {k: unicode_to_wsgi(v) for k,v in os.environ.items()}\n environ['wsgi.input'] = sys.stdin.buffer\n environ['wsgi.errors'] = sys.stderr\n environ['wsgi.version'] = (1, 0)\n environ['wsgi.multithread'] = False\n environ['wsgi.multiprocess'] = True\n environ['wsgi.run_once'] = True\n\n if environ.get('HTTPS', 'off') in ('on', '1'):\n environ['wsgi.url_scheme'] = 'https'\n else:\n environ['wsgi.url_scheme'] = 'http'\n\n headers_set = []\n headers_sent = []\n\n def write(data):\n out = sys.stdout.buffer\n\n if not headers_set:\n raise AssertionError(\"write() before start_response()\")\n\n elif not headers_sent:\n # Before the first output, send the stored headers\n status, response_headers = headers_sent[:] = headers_set\n out.write(wsgi_to_bytes('Status: %s\\r\\n' % status))\n for header in response_headers:\n out.write(wsgi_to_bytes('%s: %s\\r\\n' % header))\n out.write(wsgi_to_bytes('\\r\\n'))\n\n out.write(data)\n out.flush()\n\n def start_response(status, response_headers, exc_info=None):\n if exc_info:\n try:\n if headers_sent:\n # Re-raise original exception if headers sent\n raise exc_info[1].with_traceback(exc_info[2])\n finally:\n exc_info = None # avoid dangling circular ref\n elif headers_set:\n raise AssertionError(\"Headers already set!\")\n\n headers_set[:] = [status, response_headers]\n\n # Note: error checking on the headers should happen here,\n # *after* the headers are set. That way, if an error\n # occurs, start_response can only be re-called with\n # exc_info set.\n\n return write\n\n result = application(environ, start_response)\n try:\n for data in result:\n if data: # don't send headers until body appears\n write(data)\n if not headers_sent:\n write(b'') # send headers now if body was empty\n finally:\n if hasattr(result, 'close'):\n result.close()\n\nMiddleware: Components that Play Both Sides\n\nNote that a single object may play the role of a server with respect to\nsome application(s), while also acting as an application with respect to\nsome server(s). Such \"middleware\" components can perform such functions\nas:\n\n- Routing a request to different application objects based on the\n target URL, after rewriting the environ accordingly.\n- Allowing multiple applications or frameworks to run side by side in\n the same process\n- Load balancing and remote processing, by forwarding requests and\n responses over a network\n- Perform content postprocessing, such as applying XSL stylesheets\n\nThe presence of middleware in general is transparent to both the\n\"server/gateway\" and the \"application/framework\" sides of the interface,\nand should require no special support. A user who desires to incorporate\nmiddleware into an application simply provides the middleware component\nto the server, as if it were an application, and configures the\nmiddleware component to invoke the application, as if the middleware\ncomponent were a server. Of course, the \"application\" that the\nmiddleware wraps may in fact be another middleware component wrapping\nanother application, and so on, creating what is referred to as a\n\"middleware stack\".\n\nFor the most part, middleware must conform to the restrictions and\nrequirements of both the server and application sides of WSGI. In some\ncases, however, requirements for middleware are more stringent than for\na \"pure\" server or application, and these points will be noted in the\nspecification.\n\nHere is a (tongue-in-cheek) example of a middleware component that\nconverts text/plain responses to pig Latin, using Joe Strout's\npiglatin.py. (Note: a \"real\" middleware component would probably use a\nmore robust way of checking the content type, and should also check for\na content encoding. Also, this simple example ignores the possibility\nthat a word might be split across a block boundary.)\n\n from piglatin import piglatin\n\n class LatinIter:\n\n \"\"\"Transform iterated output to piglatin, if it's okay to do so\n\n Note that the \"okayness\" can change until the application yields\n its first non-empty bytestring, so 'transform_ok' has to be a mutable\n truth value.\n \"\"\"\n\n def __init__(self, result, transform_ok):\n if hasattr(result, 'close'):\n self.close = result.close\n self._next = iter(result).__next__\n self.transform_ok = transform_ok\n\n def __iter__(self):\n return self\n\n def __next__(self):\n data = self._next()\n if self.transform_ok:\n return piglatin(data) # call must be byte-safe on Py3\n else:\n return data\n\n class Latinator:\n\n # by default, don't transform output\n transform = False\n\n def __init__(self, application):\n self.application = application\n\n def __call__(self, environ, start_response):\n\n transform_ok = []\n\n def start_latin(status, response_headers, exc_info=None):\n\n # Reset ok flag, in case this is a repeat call\n del transform_ok[:]\n\n for name, value in response_headers:\n if name.lower() == 'content-type' and value == 'text/plain':\n transform_ok.append(True)\n # Strip content-length if present, else it'll be wrong\n response_headers = [(name, value)\n for name, value in response_headers\n if name.lower() != 'content-length'\n ]\n break\n\n write = start_response(status, response_headers, exc_info)\n\n if transform_ok:\n def write_latin(data):\n write(piglatin(data)) # call must be byte-safe on Py3\n return write_latin\n else:\n return write\n\n return LatinIter(self.application(environ, start_latin), transform_ok)\n\n\n # Run foo_app under a Latinator's control, using the example CGI gateway\n from foo_app import foo_app\n run_with_cgi(Latinator(foo_app))\n\nSpecification Details\n\nThe application object must accept two positional arguments. For the\nsake of illustration, we have named them environ and start_response, but\nthey are not required to have these names. A server or gateway must\ninvoke the application object using positional (not keyword) arguments.\n(E.g. by calling result = application(environ, start_response) as shown\nabove.)\n\nThe environ parameter is a dictionary object, containing CGI-style\nenvironment variables. This object must be a builtin Python dictionary\n(not a subclass, UserDict or other dictionary emulation), and the\napplication is allowed to modify the dictionary in any way it desires.\nThe dictionary must also include certain WSGI-required variables\n(described in a later section), and may also include server-specific\nextension variables, named according to a convention that will be\ndescribed below.\n\nThe start_response parameter is a callable accepting two required\npositional arguments, and one optional argument. For the sake of\nillustration, we have named these arguments status, response_headers,\nand exc_info, but they are not required to have these names, and the\napplication must invoke the start_response callable using positional\narguments (e.g. start_response(status, response_headers)).\n\nThe status parameter is a status string of the form \"999 Message here\",\nand response_headers is a list of (header_name, header_value) tuples\ndescribing the HTTP response header. The optional exc_info parameter is\ndescribed below in the sections on The start_response() Callable and\nError Handling. It is used only when the application has trapped an\nerror and is attempting to display an error message to the browser.\n\nThe start_response callable must return a write(body_data) callable that\ntakes one positional parameter: a bytestring to be written as part of\nthe HTTP response body. (Note: the write() callable is provided only to\nsupport certain existing frameworks' imperative output APIs; it should\nnot be used by new applications or frameworks if it can be avoided. See\nthe Buffering and Streaming section for more details.)\n\nWhen called by the server, the application object must return an\niterable yielding zero or more bytestrings. This can be accomplished in\na variety of ways, such as by returning a list of bytestrings, or by the\napplication being a generator function that yields bytestrings, or by\nthe application being a class whose instances are iterable. Regardless\nof how it is accomplished, the application object must always return an\niterable yielding zero or more bytestrings.\n\nThe server or gateway must transmit the yielded bytestrings to the\nclient in an unbuffered fashion, completing the transmission of each\nbytestring before requesting another one. (In other words, applications\nshould perform their own buffering. See the Buffering and Streaming\nsection below for more on how application output must be handled.)\n\nThe server or gateway should treat the yielded bytestrings as binary\nbyte sequences: in particular, it should ensure that line endings are\nnot altered. The application is responsible for ensuring that the\nbytestring(s) to be written are in a format suitable for the client.\n(The server or gateway may apply HTTP transfer encodings, or perform\nother transformations for the purpose of implementing HTTP features such\nas byte-range transmission. See Other HTTP Features, below, for more\ndetails.)\n\nIf a call to len(iterable) succeeds, the server must be able to rely on\nthe result being accurate. That is, if the iterable returned by the\napplication provides a working __len__() method, it must return an\naccurate result. (See the Handling the Content-Length Header section for\ninformation on how this would normally be used.)\n\nIf the iterable returned by the application has a close() method, the\nserver or gateway must call that method upon completion of the current\nrequest, whether the request was completed normally, or terminated early\ndue to an application error during iteration or an early disconnect of\nthe browser. (The close() method requirement is to support resource\nrelease by the application. This protocol is intended to complement PEP\n342's generator support, and other common iterables with close()\nmethods.)\n\nApplications returning a generator or other custom iterator should not\nassume the entire iterator will be consumed, as it may be closed early\nby the server.\n\n(Note: the application must invoke the start_response() callable before\nthe iterable yields its first body bytestring, so that the server can\nsend the headers before any body content. However, this invocation may\nbe performed by the iterable's first iteration, so servers must not\nassume that start_response() has been called before they begin iterating\nover the iterable.)\n\nFinally, servers and gateways must not directly use any other attributes\nof the iterable returned by the application, unless it is an instance of\na type specific to that server or gateway, such as a \"file wrapper\"\nreturned by wsgi.file_wrapper (see Optional Platform-Specific File\nHandling). In the general case, only attributes specified here, or\naccessed via e.g. the PEP 234 iteration APIs are acceptable.\n\nenviron Variables\n\nThe environ dictionary is required to contain these CGI environment\nvariables, as defined by the Common Gateway Interface specification[4].\nThe following variables must be present, unless their value would be an\nempty string, in which case they may be omitted, except as otherwise\nnoted below.\n\nREQUEST_METHOD\n\n The HTTP request method, such as \"GET\" or \"POST\". This cannot ever\n be an empty string, and so is always required.\n\nSCRIPT_NAME\n\n The initial portion of the request URL's \"path\" that corresponds to\n the application object, so that the application knows its virtual\n \"location\". This may be an empty string, if the application\n corresponds to the \"root\" of the server.\n\nPATH_INFO\n\n The remainder of the request URL's \"path\", designating the virtual\n \"location\" of the request's target within the application. This may\n be an empty string, if the request URL targets the application root\n and does not have a trailing slash.\n\nQUERY_STRING\n\n The portion of the request URL that follows the \"?\", if any. May be\n empty or absent.\n\nCONTENT_TYPE\n\n The contents of any Content-Type fields in the HTTP request. May be\n empty or absent.\n\nCONTENT_LENGTH\n\n The contents of any Content-Length fields in the HTTP request. May\n be empty or absent.\n\nSERVER_NAME, SERVER_PORT\n\n When HTTP_HOST is not set, these variables can be combined to\n determine a default. See the URL Reconstruction section below for\n more detail. SERVER_NAME and SERVER_PORT are required strings and\n must never be empty.\n\nSERVER_PROTOCOL\n\n The version of the protocol the client used to send the request.\n Typically this will be something like \"HTTP/1.0\" or \"HTTP/1.1\" and\n may be used by the application to determine how to treat any HTTP\n request headers. (This variable should probably be called\n REQUEST_PROTOCOL, since it denotes the protocol used in the request,\n and is not necessarily the protocol that will be used in the\n server's response. However, for compatibility with CGI we have to\n keep the existing name.)\n\nHTTP_ Variables\n\n Variables corresponding to the client-supplied HTTP request headers\n (i.e., variables whose names begin with \"HTTP_\"). The presence or\n absence of these variables should correspond with the presence or\n absence of the appropriate HTTP header in the request.\n\nA server or gateway should attempt to provide as many other CGI\nvariables as are applicable. In addition, if SSL is in use, the server\nor gateway should also provide as many of the Apache SSL environment\nvariables[5] as are applicable, such as HTTPS=on and SSL_PROTOCOL. Note,\nhowever, that an application that uses any CGI variables other than the\nones listed above are necessarily non-portable to web servers that do\nnot support the relevant extensions. (For example, web servers that do\nnot publish files will not be able to provide a meaningful DOCUMENT_ROOT\nor PATH_TRANSLATED.)\n\nA WSGI-compliant server or gateway should document what variables it\nprovides, along with their definitions as appropriate. Applications\nshould check for the presence of any variables they require, and have a\nfallback plan in the event such a variable is absent.\n\nNote: missing variables (such as REMOTE_USER when no authentication has\noccurred) should be left out of the environ dictionary. Also note that\nCGI-defined variables must be native strings, if they are present at\nall. It is a violation of this specification for any CGI variable's\nvalue to be of any type other than str.\n\nIn addition to the CGI-defined variables, the environ dictionary may\nalso contain arbitrary operating-system \"environment variables\", and\nmust contain the following WSGI-defined variables:\n\n+-------------------+-------------------------------------------------+\n| Variable | Value |\n+===================+=================================================+\n| wsgi.version | The tuple (1, 0), representing WSGI version |\n| | 1.0. |\n+-------------------+-------------------------------------------------+\n| wsgi.url_scheme | A string representing the \"scheme\" portion of |\n| | the URL at which the application is being |\n| | invoked. Normally, this will have the value |\n| | \"http\" or \"https\", as appropriate. |\n+-------------------+-------------------------------------------------+\n| wsgi.input | An input stream (file-like object) from which |\n| | the HTTP request body bytes can be read. (The |\n| | server or gateway may perform reads on-demand |\n| | as requested by the application, or it may |\n| | pre-read the client's request body and buffer |\n| | it in-memory or on disk, or use any other |\n| | technique for providing such an input stream, |\n| | according to its preference.) |\n+-------------------+-------------------------------------------------+\n| wsgi.errors | An output stream (file-like object) to which |\n| | error output can be written, for the purpose of |\n| | recording program or other errors in a |\n| | standardized and possibly centralized location. |\n| | This should be a \"text mode\" stream; i.e., |\n| | applications should use \"\\n\" as a line ending, |\n| | and assume that it will be converted to the |\n| | correct line ending by the server/gateway. |\n| | |\n| | (On platforms where the str type is unicode, |\n| | the error stream should accept and log |\n| | arbitrary unicode without raising an error; it |\n| | is allowed, however, to substitute characters |\n| | that cannot be rendered in the stream's |\n| | encoding.) |\n| | |\n| | For many servers, wsgi.errors will be the |\n| | server's main error log. Alternatively, this |\n| | may be sys.stderr, or a log file of some sort. |\n| | The server's documentation should include an |\n| | explanation of how to configure this or where |\n| | to find the recorded output. A server or |\n| | gateway may supply different error streams to |\n| | different applications, if this is desired. |\n+-------------------+-------------------------------------------------+\n| wsgi.multithread | This value should evaluate true if the |\n| | application object may be simultaneously |\n| | invoked by another thread in the same process, |\n| | and should evaluate false otherwise. |\n+-------------------+-------------------------------------------------+\n| wsgi.multiprocess | This value should evaluate true if an |\n| | equivalent application object may be |\n| | simultaneously invoked by another process, and |\n| | should evaluate false otherwise. |\n+-------------------+-------------------------------------------------+\n| wsgi.run_once | This value should evaluate true if the server |\n| | or gateway expects (but does not guarantee!) |\n| | that the application will only be invoked this |\n| | one time during the life of its containing |\n| | process. Normally, this will only be true for a |\n| | gateway based on CGI (or something similar). |\n+-------------------+-------------------------------------------------+\n\nFinally, the environ dictionary may also contain server-defined\nvariables. These variables should be named using only lower-case\nletters, numbers, dots, and underscores, and should be prefixed with a\nname that is unique to the defining server or gateway. For example,\nmod_python might define variables with names like\nmod_python.some_variable.\n\nInput and Error Streams\n\nThe input and error streams provided by the server must support the\nfollowing methods:\n\n Method Stream Notes\n ----------------- -------- -------\n read(size) input 1\n readline() input 1, 2\n readlines(hint) input 1, 3\n __iter__() input \n flush() errors 4\n write(str) errors \n writelines(seq) errors \n\nThe semantics of each method are as documented in the Python Library\nReference, except for these notes as listed in the table above:\n\n1. The server is not required to read past the client's specified\n Content-Length, and should simulate an end-of-file condition if the\n application attempts to read past that point. The application should\n not attempt to read more data than is specified by the\n CONTENT_LENGTH variable.\n\n A server should allow read() to be called without an argument, and\n return the remainder of the client's input stream.\n\n A server should return empty bytestrings from any attempt to read\n from an empty or exhausted input stream.\n\n2. Servers should support the optional \"size\" argument to readline(),\n but as in WSGI 1.0, they are allowed to omit support for it.\n\n (In WSGI 1.0, the size argument was not supported, on the grounds\n that it might have been complex to implement, and was not often used\n in practice... but then the cgi module started using it, and so\n practical servers had to start supporting it anyway!)\n\n3. Note that the hint argument to readlines() is optional for both\n caller and implementer. The application is free not to supply it,\n and the server or gateway is free to ignore it.\n\n4. Since the errors stream may not be rewound, servers and gateways are\n free to forward write operations immediately, without buffering. In\n this case, the flush() method may be a no-op. Portable applications,\n however, cannot assume that output is unbuffered or that flush() is\n a no-op. They must call flush() if they need to ensure that output\n has in fact been written. (For example, to minimize intermingling of\n data from multiple processes writing to the same error log.)\n\nThe methods listed in the table above must be supported by all servers\nconforming to this specification. Applications conforming to this\nspecification must not use any other methods or attributes of the input\nor errors objects. In particular, applications must not attempt to close\nthese streams, even if they possess close() methods.\n\nThe start_response() Callable\n\nThe second parameter passed to the application object is a callable of\nthe form start_response(status, response_headers, exc_info=None). (As\nwith all WSGI callables, the arguments must be supplied positionally,\nnot by keyword.) The start_response callable is used to begin the HTTP\nresponse, and it must return a write(body_data) callable (see the\nBuffering and Streaming section, below).\n\nThe status argument is an HTTP \"status\" string like \"200 OK\" or\n\"404 Not Found\". That is, it is a string consisting of a Status-Code and\na Reason-Phrase, in that order and separated by a single space, with no\nsurrounding whitespace or other characters. (See 2616, Section 6.1.1 for\nmore information.) The string must not contain control characters, and\nmust not be terminated with a carriage return, linefeed, or combination\nthereof.\n\nThe response_headers argument is a list of (header_name, header_value)\ntuples. It must be a Python list; i.e.\ntype(response_headers) is ListType, and the server may change its\ncontents in any way it desires. Each header_name must be a valid HTTP\nheader field-name (as defined by 2616, Section 4.2), without a trailing\ncolon or other punctuation.\n\nEach header_value must not include any control characters, including\ncarriage returns or linefeeds, either embedded or at the end. (These\nrequirements are to minimize the complexity of any parsing that must be\nperformed by servers, gateways, and intermediate response processors\nthat need to inspect or modify response headers.)\n\nIn general, the server or gateway is responsible for ensuring that\ncorrect headers are sent to the client: if the application omits a\nheader required by HTTP (or other relevant specifications that are in\neffect), the server or gateway must add it. For example, the HTTP Date:\nand Server: headers would normally be supplied by the server or gateway.\n\n(A reminder for server/gateway authors: HTTP header names are\ncase-insensitive, so be sure to take that into consideration when\nexamining application-supplied headers!)\n\nApplications and middleware are forbidden from using HTTP/1.1\n\"hop-by-hop\" features or headers, any equivalent features in HTTP/1.0,\nor any headers that would affect the persistence of the client's\nconnection to the web server. These features are the exclusive province\nof the actual web server, and a server or gateway should consider it a\nfatal error for an application to attempt sending them, and raise an\nerror if they are supplied to start_response(). (For more specifics on\n\"hop-by-hop\" features and headers, please see the Other HTTP Features\nsection below.)\n\nServers should check for errors in the headers at the time\nstart_response is called, so that an error can be raised while the\napplication is still running.\n\nHowever, the start_response callable must not actually transmit the\nresponse headers. Instead, it must store them for the server or gateway\nto transmit only after the first iteration of the application return\nvalue that yields a non-empty bytestring, or upon the application's\nfirst invocation of the write() callable. In other words, response\nheaders must not be sent until there is actual body data available, or\nuntil the application's returned iterable is exhausted. (The only\npossible exception to this rule is if the response headers explicitly\ninclude a Content-Length of zero.)\n\nThis delaying of response header transmission is to ensure that buffered\nand asynchronous applications can replace their originally intended\noutput with error output, up until the last possible moment. For\nexample, the application may need to change the response status from\n\"200 OK\" to \"500 Internal Error\", if an error occurs while the body is\nbeing generated within an application buffer.\n\nThe exc_info argument, if supplied, must be a Python sys.exc_info()\ntuple. This argument should be supplied by the application only if\nstart_response is being called by an error handler. If exc_info is\nsupplied, and no HTTP headers have been output yet, start_response\nshould replace the currently-stored HTTP response headers with the\nnewly-supplied ones, thus allowing the application to \"change its mind\"\nabout the output when an error has occurred.\n\nHowever, if exc_info is provided, and the HTTP headers have already been\nsent, start_response must raise an error, and should re-raise using the\nexc_info tuple. That is:\n\n raise exc_info[1].with_traceback(exc_info[2])\n\nThis will re-raise the exception trapped by the application, and in\nprinciple should abort the application. (It is not safe for the\napplication to attempt error output to the browser once the HTTP headers\nhave already been sent.) The application must not trap any exceptions\nraised by start_response, if it called start_response with exc_info.\nInstead, it should allow such exceptions to propagate back to the server\nor gateway. See Error Handling below, for more details.\n\nThe application may call start_response more than once, if and only if\nthe exc_info argument is provided. More precisely, it is a fatal error\nto call start_response without the exc_info argument if start_response\nhas already been called within the current invocation of the\napplication. This includes the case where the first call to\nstart_response raised an error. (See the example CGI gateway above for\nan illustration of the correct logic.)\n\nNote: servers, gateways, or middleware implementing start_response\nshould ensure that no reference is held to the exc_info parameter beyond\nthe duration of the function's execution, to avoid creating a circular\nreference through the traceback and frames involved. The simplest way to\ndo this is something like:\n\n def start_response(status, response_headers, exc_info=None):\n if exc_info:\n try:\n # do stuff w/exc_info here\n finally:\n exc_info = None # Avoid circular ref.\n\nThe example CGI gateway provides another illustration of this technique.\n\nHandling the Content-Length Header\n\nIf the application supplies a Content-Length header, the server should\nnot transmit more bytes to the client than the header allows, and should\nstop iterating over the response when enough data has been sent, or\nraise an error if the application tries to write() past that point. (Of\ncourse, if the application does not provide enough data to meet its\nstated Content-Length, the server should close the connection and log or\notherwise report the error.)\n\nIf the application does not supply a Content-Length header, a server or\ngateway may choose one of several approaches to handling it. The\nsimplest of these is to close the client connection when the response is\ncompleted.\n\nUnder some circumstances, however, the server or gateway may be able to\neither generate a Content-Length header, or at least avoid the need to\nclose the client connection. If the application does not call the\nwrite() callable, and returns an iterable whose len() is 1, then the\nserver can automatically determine Content-Length by taking the length\nof the first bytestring yielded by the iterable.\n\nAnd, if the server and client both support HTTP/1.1\n\"chunked encoding\"<2616#section-3.6.1>, then the server may use chunked\nencoding to send a chunk for each write() call or bytestring yielded by\nthe iterable, thus generating a Content-Length header for each chunk.\nThis allows the server to keep the client connection alive, if it wishes\nto do so. Note that the server must comply fully with 2616 when doing\nthis, or else fall back to one of the other strategies for dealing with\nthe absence of Content-Length.\n\n(Note: applications and middleware must not apply any kind of\nTransfer-Encoding to their output, such as chunking or gzipping; as\n\"hop-by-hop\" operations, these encodings are the province of the actual\nweb server/gateway. See Other HTTP Features below, for more details.)\n\nBuffering and Streaming\n\nGenerally speaking, applications will achieve the best throughput by\nbuffering their (modestly-sized) output and sending it all at once. This\nis a common approach in existing frameworks such as Zope: the output is\nbuffered in a StringIO or similar object, then transmitted all at once,\nalong with the response headers.\n\nThe corresponding approach in WSGI is for the application to simply\nreturn a single-element iterable (such as a list) containing the\nresponse body as a single bytestring. This is the recommended approach\nfor the vast majority of application functions, that render HTML pages\nwhose text easily fits in memory.\n\nFor large files, however, or for specialized uses of HTTP streaming\n(such as multipart \"server push\"), an application may need to provide\noutput in smaller blocks (e.g. to avoid loading a large file into\nmemory). It's also sometimes the case that part of a response may be\ntime-consuming to produce, but it would be useful to send ahead the\nportion of the response that precedes it.\n\nIn these cases, applications will usually return an iterator (often a\ngenerator-iterator) that produces the output in a block-by-block\nfashion. These blocks may be broken to coincide with multipart\nboundaries (for \"server push\"), or just before time-consuming tasks\n(such as reading another block of an on-disk file).\n\nWSGI servers, gateways, and middleware must not delay the transmission\nof any block; they must either fully transmit the block to the client,\nor guarantee that they will continue transmission even while the\napplication is producing its next block. A server/gateway or middleware\nmay provide this guarantee in one of three ways:\n\n1. Send the entire block to the operating system (and request that any\n O/S buffers be flushed) before returning control to the application,\n OR\n2. Use a different thread to ensure that the block continues to be\n transmitted while the application produces the next block.\n3. (Middleware only) send the entire block to its parent gateway/server\n\nBy providing this guarantee, WSGI allows applications to ensure that\ntransmission will not become stalled at an arbitrary point in their\noutput data. This is critical for proper functioning of e.g. multipart\n\"server push\" streaming, where data between multipart boundaries should\nbe transmitted in full to the client.\n\nMiddleware Handling of Block Boundaries\n\nIn order to better support asynchronous applications and servers,\nmiddleware components must not block iteration waiting for multiple\nvalues from an application iterable. If the middleware needs to\naccumulate more data from the application before it can produce any\noutput, it must yield an empty bytestring.\n\nTo put this requirement another way, a middleware component must yield\nat least one value each time its underlying application yields a value.\nIf the middleware cannot yield any other value, it must yield an empty\nbytestring.\n\nThis requirement ensures that asynchronous applications and servers can\nconspire to reduce the number of threads that are required to run a\ngiven number of application instances simultaneously.\n\nNote also that this requirement means that middleware must return an\niterable as soon as its underlying application returns an iterable. It\nis also forbidden for middleware to use the write() callable to transmit\ndata that is yielded by an underlying application. Middleware may only\nuse their parent server's write() callable to transmit data that the\nunderlying application sent using a middleware-provided write()\ncallable.\n\nThe write() Callable\n\nSome existing application framework APIs support unbuffered output in a\ndifferent manner than WSGI. Specifically, they provide a \"write\"\nfunction or method of some kind to write an unbuffered block of data, or\nelse they provide a buffered \"write\" function and a \"flush\" mechanism to\nflush the buffer.\n\nUnfortunately, such APIs cannot be implemented in terms of WSGI's\n\"iterable\" application return value, unless threads or other special\nmechanisms are used.\n\nTherefore, to allow these frameworks to continue using an imperative\nAPI, WSGI includes a special write() callable, returned by the\nstart_response callable.\n\nNew WSGI applications and frameworks should not use the write() callable\nif it is possible to avoid doing so. The write() callable is strictly a\nhack to support imperative streaming APIs. In general, applications\nshould produce their output via their returned iterable, as this makes\nit possible for web servers to interleave other tasks in the same Python\nthread, potentially providing better throughput for the server as a\nwhole.\n\nThe write() callable is returned by the start_response() callable, and\nit accepts a single parameter: a bytestring to be written as part of the\nHTTP response body, that is treated exactly as though it had been\nyielded by the output iterable. In other words, before write() returns,\nit must guarantee that the passed-in bytestring was either completely\nsent to the client, or that it is buffered for transmission while the\napplication proceeds onward.\n\nAn application must return an iterable object, even if it uses write()\nto produce all or part of its response body. The returned iterable may\nbe empty (i.e. yield no non-empty bytestrings), but if it does yield\nnon-empty bytestrings, that output must be treated normally by the\nserver or gateway (i.e., it must be sent or queued immediately).\nApplications must not invoke write() from within their return iterable,\nand therefore any bytestrings yielded by the iterable are transmitted\nafter all bytestrings passed to write() have been sent to the client.\n\nUnicode Issues\n\nHTTP does not directly support Unicode, and neither does this interface.\nAll encoding/decoding must be handled by the application; all strings\npassed to or from the server must be of type str or bytes, never\nunicode. The result of using a unicode object where a string object is\nrequired, is undefined.\n\nNote also that strings passed to start_response() as a status or as\nresponse headers must follow 2616 with respect to encoding. That is,\nthey must either be ISO-8859-1 characters, or use 2047 MIME encoding.\n\nOn Python platforms where the str or StringType type is in fact\nUnicode-based (e.g. Jython, IronPython, Python 3, etc.), all \"strings\"\nreferred to in this specification must contain only code points\nrepresentable in ISO-8859-1 encoding (\\u0000 through \\u00FF, inclusive).\nIt is a fatal error for an application to supply strings containing any\nother Unicode character or code point. Similarly, servers and gateways\nmust not supply strings to an application containing any other Unicode\ncharacters.\n\nAgain, all objects referred to in this specification as \"strings\" must\nbe of type str or StringType, and must not be of type unicode or\nUnicodeType. And, even if a given platform allows for more than 8 bits\nper character in str/StringType objects, only the lower 8 bits may be\nused, for any value referred to in this specification as a \"string\".\n\nFor values referred to in this specification as \"bytestrings\" (i.e.,\nvalues read from wsgi.input, passed to write() or yielded by the\napplication), the value must be of type bytes under Python 3, and str in\nearlier versions of Python.\n\nError Handling\n\nIn general, applications should try to trap their own, internal errors,\nand display a helpful message in the browser. (It is up to the\napplication to decide what \"helpful\" means in this context.)\n\nHowever, to display such a message, the application must not have\nactually sent any data to the browser yet, or else it risks corrupting\nthe response. WSGI therefore provides a mechanism to either allow the\napplication to send its error message, or be automatically aborted: the\nexc_info argument to start_response. Here is an example of its use:\n\n try:\n # regular application code here\n status = \"200 Froody\"\n response_headers = [(\"content-type\", \"text/plain\")]\n start_response(status, response_headers)\n return [\"normal body goes here\"]\n except:\n # XXX should trap runtime issues like MemoryError, KeyboardInterrupt\n # in a separate handler before this bare 'except:'...\n status = \"500 Oops\"\n response_headers = [(\"content-type\", \"text/plain\")]\n start_response(status, response_headers, sys.exc_info())\n return [\"error body goes here\"]\n\nIf no output has been written when an exception occurs, the call to\nstart_response will return normally, and the application will return an\nerror body to be sent to the browser. However, if any output has already\nbeen sent to the browser, start_response will reraise the provided\nexception. This exception should not be trapped by the application, and\nso the application will abort. The server or gateway can then trap this\n(fatal) exception and abort the response.\n\nServers should trap and log any exception that aborts an application or\nthe iteration of its return value. If a partial response has already\nbeen written to the browser when an application error occurs, the server\nor gateway may attempt to add an error message to the output, if the\nalready-sent headers indicate a text/* content type that the server\nknows how to modify cleanly.\n\nSome middleware may wish to provide additional exception handling\nservices, or intercept and replace application error messages. In such\ncases, middleware may choose to not re-raise the exc_info supplied to\nstart_response, but instead raise a middleware-specific exception, or\nsimply return without an exception after storing the supplied arguments.\nThis will then cause the application to return its error body iterable\n(or invoke write()), allowing the middleware to capture and modify the\nerror output. These techniques will work as long as application authors:\n\n1. Always provide exc_info when beginning an error response\n2. Never trap errors raised by start_response when exc_info is being\n provided\n\nHTTP 1.1 Expect/Continue\n\nServers and gateways that implement HTTP 1.1 must provide transparent\nsupport for HTTP 1.1's \"expect/continue\" mechanism. This may be done in\nany of several ways:\n\n1. Respond to requests containing an Expect: 100-continue request with\n an immediate \"100 Continue\" response, and proceed normally.\n2. Proceed with the request normally, but provide the application with\n a wsgi.input stream that will send the \"100 Continue\" response\n if/when the application first attempts to read from the input\n stream. The read request must then remain blocked until the client\n responds.\n3. Wait until the client decides that the server does not support\n expect/continue, and sends the request body on its own. (This is\n suboptimal, and is not recommended.)\n\nNote that these behavior restrictions do not apply for HTTP 1.0\nrequests, or for requests that are not directed to an application\nobject. For more information on HTTP 1.1 Expect/Continue, see 2616,\nsections 8.2.3 and 10.1.1.\n\nOther HTTP Features\n\nIn general, servers and gateways should \"play dumb\" and allow the\napplication complete control over its output. They should only make\nchanges that do not alter the effective semantics of the application's\nresponse. It is always possible for the application developer to add\nmiddleware components to supply additional features, so server/gateway\ndevelopers should be conservative in their implementation. In a sense, a\nserver should consider itself to be like an HTTP \"gateway server\", with\nthe application being an HTTP \"origin server\". (See 2616, section 1.3,\nfor the definition of these terms.)\n\nHowever, because WSGI servers and applications do not communicate via\nHTTP, what 2616 calls \"hop-by-hop\" headers do not apply to WSGI internal\ncommunications. WSGI applications must not generate any\n\"hop-by-hop\" headers <2616#section-13.5.1>, attempt to use HTTP features\nthat would require them to generate such headers, or rely on the content\nof any incoming \"hop-by-hop\" headers in the environ dictionary. WSGI\nservers must handle any supported inbound \"hop-by-hop\" headers on their\nown, such as by decoding any inbound Transfer-Encoding, including\nchunked encoding if applicable.\n\nApplying these principles to a variety of HTTP features, it should be\nclear that a server may handle cache validation via the If-None-Match\nand If-Modified-Since request headers and the Last-Modified and ETag\nresponse headers. However, it is not required to do this, and the\napplication should perform its own cache validation if it wants to\nsupport that feature, since the server/gateway is not required to do\nsuch validation.\n\nSimilarly, a server may re-encode or transport-encode an application's\nresponse, but the application should use a suitable content encoding on\nits own, and must not apply a transport encoding. A server may transmit\nbyte ranges of the application's response if requested by the client,\nand the application doesn't natively support byte ranges. Again,\nhowever, the application should perform this function on its own if\ndesired.\n\nNote that these restrictions on applications do not necessarily mean\nthat every application must reimplement every HTTP feature; many HTTP\nfeatures can be partially or fully implemented by middleware components,\nthus freeing both server and application authors from implementing the\nsame features over and over again.\n\nThread Support\n\nThread support, or lack thereof, is also server-dependent. Servers that\ncan run multiple requests in parallel, should also provide the option of\nrunning an application in a single-threaded fashion, so that\napplications or frameworks that are not thread-safe may still be used\nwith that server.\n\nImplementation/Application Notes\n\nServer Extension APIs\n\nSome server authors may wish to expose more advanced APIs, that\napplication or framework authors can use for specialized purposes. For\nexample, a gateway based on mod_python might wish to expose part of the\nApache API as a WSGI extension.\n\nIn the simplest case, this requires nothing more than defining an\nenviron variable, such as mod_python.some_api. But, in many cases, the\npossible presence of middleware can make this difficult. For example, an\nAPI that offers access to the same HTTP headers that are found in\nenviron variables, might return different data if environ has been\nmodified by middleware.\n\nIn general, any extension API that duplicates, supplants, or bypasses\nsome portion of WSGI functionality runs the risk of being incompatible\nwith middleware components. Server/gateway developers should not assume\nthat nobody will use middleware, because some framework developers\nspecifically intend to organize or reorganize their frameworks to\nfunction almost entirely as middleware of various kinds.\n\nSo, to provide maximum compatibility, servers and gateways that provide\nextension APIs that replace some WSGI functionality, must design those\nAPIs so that they are invoked using the portion of the API that they\nreplace. For example, an extension API to access HTTP request headers\nmust require the application to pass in its current environ, so that the\nserver/gateway may verify that HTTP headers accessible via the API have\nnot been altered by middleware. If the extension API cannot guarantee\nthat it will always agree with environ about the contents of HTTP\nheaders, it must refuse service to the application, e.g. by raising an\nerror, returning None instead of a header collection, or whatever is\nappropriate to the API.\n\nSimilarly, if an extension API provides an alternate means of writing\nresponse data or headers, it should require the start_response callable\nto be passed in, before the application can obtain the extended service.\nIf the object passed in is not the same one that the server/gateway\noriginally supplied to the application, it cannot guarantee correct\noperation and must refuse to provide the extended service to the\napplication.\n\nThese guidelines also apply to middleware that adds information such as\nparsed cookies, form variables, sessions, and the like to environ.\nSpecifically, such middleware should provide these features as functions\nwhich operate on environ, rather than simply stuffing values into\nenviron. This helps ensure that information is calculated from environ\nafter any middleware has done any URL rewrites or other environ\nmodifications.\n\nIt is very important that these \"safe extension\" rules be followed by\nboth server/gateway and middleware developers, in order to avoid a\nfuture in which middleware developers are forced to delete any and all\nextension APIs from environ to ensure that their mediation isn't being\nbypassed by applications using those extensions!\n\nApplication Configuration\n\nThis specification does not define how a server selects or obtains an\napplication to invoke. These and other configuration options are highly\nserver-specific matters. It is expected that server/gateway authors will\ndocument how to configure the server to execute a particular application\nobject, and with what options (such as threading options).\n\nFramework authors, on the other hand, should document how to create an\napplication object that wraps their framework's functionality. The user,\nwho has chosen both the server and the application framework, must\nconnect the two together. However, since both the framework and the\nserver now have a common interface, this should be merely a mechanical\nmatter, rather than a significant engineering effort for each new\nserver/framework pair.\n\nFinally, some applications, frameworks, and middleware may wish to use\nthe environ dictionary to receive simple string configuration options.\nServers and gateways should support this by allowing an application's\ndeployer to specify name-value pairs to be placed in environ. In the\nsimplest case, this support can consist merely of copying all operating\nsystem-supplied environment variables from os.environ into the environ\ndictionary, since the deployer in principle can configure these\nexternally to the server, or in the CGI case they may be able to be set\nvia the server's configuration files.\n\nApplications should try to keep such required variables to a minimum,\nsince not all servers will support easy configuration of them. Of\ncourse, even in the worst case, persons deploying an application can\ncreate a script to supply the necessary configuration values:\n\n from the_app import application\n\n def new_app(environ, start_response):\n environ['the_app.configval1'] = 'something'\n return application(environ, start_response)\n\nBut, most existing applications and frameworks will probably only need a\nsingle configuration value from environ, to indicate the location of\ntheir application or framework-specific configuration file(s). (Of\ncourse, applications should cache such configuration, to avoid having to\nre-read it upon each invocation.)\n\nURL Reconstruction\n\nIf an application wishes to reconstruct a request's complete URL, it may\ndo so using the following algorithm, contributed by Ian Bicking:\n\n from urllib.parse import quote\n url = environ['wsgi.url_scheme']+'://'\n\n if environ.get('HTTP_HOST'):\n url += environ['HTTP_HOST']\n else:\n url += environ['SERVER_NAME']\n\n if environ['wsgi.url_scheme'] == 'https':\n if environ['SERVER_PORT'] != '443':\n url += ':' + environ['SERVER_PORT']\n else:\n if environ['SERVER_PORT'] != '80':\n url += ':' + environ['SERVER_PORT']\n\n url += quote(environ.get('SCRIPT_NAME', ''))\n url += quote(environ.get('PATH_INFO', ''))\n if environ.get('QUERY_STRING'):\n url += '?' + environ['QUERY_STRING']\n\nNote that such a reconstructed URL may not be precisely the same URI as\nrequested by the client. Server rewrite rules, for example, may have\nmodified the client's originally requested URL to place it in a\ncanonical form.\n\nSupporting Older (<2.2) Versions of Python\n\nSome servers, gateways, or applications may wish to support older (<2.2)\nversions of Python. This is especially important if Jython is a target\nplatform, since as of this writing a production-ready version of Jython\n2.2 is not yet available.\n\nFor servers and gateways, this is relatively straightforward: servers\nand gateways targeting pre-2.2 versions of Python must simply restrict\nthemselves to using only a standard \"for\" loop to iterate over any\niterable returned by an application. This is the only way to ensure\nsource-level compatibility with both the pre-2.2 iterator protocol\n(discussed further below) and \"today's\" iterator protocol (see PEP 234).\n\n(Note that this technique necessarily applies only to servers, gateways,\nor middleware that are written in Python. Discussion of how to use\niterator protocol(s) correctly from other languages is outside the scope\nof this PEP.)\n\nFor applications, supporting pre-2.2 versions of Python is slightly more\ncomplex:\n\n- You may not return a file object and expect it to work as an\n iterable, since before Python 2.2, files were not iterable. (In\n general, you shouldn't do this anyway, because it will perform quite\n poorly most of the time!) Use wsgi.file_wrapper or an\n application-specific file wrapper class. (See Optional\n Platform-Specific File Handling for more on wsgi.file_wrapper, and\n an example class you can use to wrap a file as an iterable.)\n- If you return a custom iterable, it must implement the pre-2.2\n iterator protocol. That is, provide a __getitem__ method that\n accepts an integer key, and raises IndexError when exhausted. (Note\n that built-in sequence types are also acceptable, since they also\n implement this protocol.)\n\nFinally, middleware that wishes to support pre-2.2 versions of Python,\nand iterates over application return values or itself returns an\niterable (or both), must follow the appropriate recommendations above.\n\n(Note: It should go without saying that to support pre-2.2 versions of\nPython, any server, gateway, application, or middleware must also use\nonly language features available in the target version, use 1 and 0\ninstead of True and False, etc.)\n\nOptional Platform-Specific File Handling\n\nSome operating environments provide special high-performance\nfile-transmission facilities, such as the Unix sendfile() call. Servers\nand gateways may expose this functionality via an optional\nwsgi.file_wrapper key in the environ. An application may use this \"file\nwrapper\" to convert a file or file-like object into an iterable that it\nthen returns, e.g.:\n\n if 'wsgi.file_wrapper' in environ:\n return environ['wsgi.file_wrapper'](filelike, block_size)\n else:\n return iter(lambda: filelike.read(block_size), '')\n\nIf the server or gateway supplies wsgi.file_wrapper, it must be a\ncallable that accepts one required positional parameter, and one\noptional positional parameter. The first parameter is the file-like\nobject to be sent, and the second parameter is an optional block size\n\"suggestion\" (which the server/gateway need not use). The callable must\nreturn an iterable object, and must not perform any data transmission\nuntil and unless the server/gateway actually receives the iterable as a\nreturn value from the application. (To do otherwise would prevent\nmiddleware from being able to interpret or override the response data.)\n\nTo be considered \"file-like\", the object supplied by the application\nmust have a read() method that takes an optional size argument. It may\nhave a close() method, and if so, the iterable returned by\nwsgi.file_wrapper must have a close() method that invokes the original\nfile-like object's close() method. If the \"file-like\" object has any\nother methods or attributes with names matching those of Python built-in\nfile objects (e.g. fileno()), the wsgi.file_wrapper may assume that\nthese methods or attributes have the same semantics as those of a\nbuilt-in file object.\n\nThe actual implementation of any platform-specific file handling must\noccur after the application returns, and the server or gateway checks to\nsee if a wrapper object was returned. (Again, because of the presence of\nmiddleware, error handlers, and the like, it is not guaranteed that any\nwrapper created will actually be used.)\n\nApart from the handling of close(), the semantics of returning a file\nwrapper from the application should be the same as if the application\nhad returned iter(filelike.read, ''). In other words, transmission\nshould begin at the current position within the \"file\" at the time that\ntransmission begins, and continue until the end is reached, or until\nContent-Length bytes have been written. (If the application doesn't\nsupply a Content-Length, the server may generate one from the file using\nits knowledge of the underlying file implementation.)\n\nOf course, platform-specific file transmission APIs don't usually accept\narbitrary \"file-like\" objects. Therefore, a wsgi.file_wrapper has to\nintrospect the supplied object for things such as a fileno() (Unix-like\nOSes) or a java.nio.FileChannel (under Jython) in order to determine if\nthe file-like object is suitable for use with the platform-specific API\nit supports.\n\nNote that even if the object is not suitable for the platform API, the\nwsgi.file_wrapper must still return an iterable that wraps read() and\nclose(), so that applications using file wrappers are portable across\nplatforms. Here's a simple platform-agnostic file wrapper class,\nsuitable for old (pre 2.2) and new Pythons alike:\n\n class FileWrapper:\n\n def __init__(self, filelike, blksize=8192):\n self.filelike = filelike\n self.blksize = blksize\n if hasattr(filelike, 'close'):\n self.close = filelike.close\n\n def __getitem__(self, key):\n data = self.filelike.read(self.blksize)\n if data:\n return data\n raise IndexError\n\nand here is a snippet from a server/gateway that uses it to provide\naccess to a platform-specific API:\n\n environ['wsgi.file_wrapper'] = FileWrapper\n result = application(environ, start_response)\n\n try:\n if isinstance(result, FileWrapper):\n # check if result.filelike is usable w/platform-specific\n # API, and if so, use that API to transmit the result.\n # If not, fall through to normal iterable handling\n # loop below.\n\n for data in result:\n # etc.\n\n finally:\n if hasattr(result, 'close'):\n result.close()\n\nQuestions and Answers\n\n1. Why must environ be a dictionary? What's wrong with using a\n subclass?\n\n The rationale for requiring a dictionary is to maximize portability\n between servers. The alternative would be to define some subset of a\n dictionary's methods as being the standard and portable interface.\n In practice, however, most servers will probably find a dictionary\n adequate to their needs, and thus framework authors will come to\n expect the full set of dictionary features to be available, since\n they will be there more often than not. But, if some server chooses\n not to use a dictionary, then there will be interoperability\n problems despite that server's \"conformance\" to spec. Therefore,\n making a dictionary mandatory simplifies the specification and\n guarantees interoperability.\n\n Note that this does not prevent server or framework developers from\n offering specialized services as custom variables inside the environ\n dictionary. This is the recommended approach for offering any such\n value-added services.\n\n2. Why can you call write() and yield bytestrings/return an iterable?\n Shouldn't we pick just one way?\n\n If we supported only the iteration approach, then current frameworks\n that assume the availability of \"push\" suffer. But, if we only\n support pushing via write(), then server performance suffers for\n transmission of e.g. large files (if a worker thread can't begin\n work on a new request until all of the output has been sent). Thus,\n this compromise allows an application framework to support both\n approaches, as appropriate, but with only a little more burden to\n the server implementor than a push-only approach would require.\n\n3. What's the close() for?\n\n When writes are done during the execution of an application object,\n the application can ensure that resources are released using a\n try/finally block. But, if the application returns an iterable, any\n resources used will not be released until the iterable is garbage\n collected. The close() idiom allows an application to release\n critical resources at the end of a request, and it's\n forward-compatible with the support for try/finally in generators\n that's proposed by PEP 325.\n\n4. Why is this interface so low-level? I want feature X! (e.g. cookies,\n sessions, persistence, ...)\n\n This isn't Yet Another Python Web Framework. It's just a way for\n frameworks to talk to web servers, and vice versa. If you want these\n features, you need to pick a web framework that provides the\n features you want. And if that framework lets you create a WSGI\n application, you should be able to run it in most WSGI-supporting\n servers. Also, some WSGI servers may offer additional services via\n objects provided in their environ dictionary; see the applicable\n server documentation for details. (Of course, applications that use\n such extensions will not be portable to other WSGI-based servers.)\n\n5. Why use CGI variables instead of good old HTTP headers? And why mix\n them in with WSGI-defined variables?\n\n Many existing web frameworks are built heavily upon the CGI spec,\n and existing web servers know how to generate CGI variables. In\n contrast, alternative ways of representing inbound HTTP information\n are fragmented and lack market share. Thus, using the CGI \"standard\"\n seems like a good way to leverage existing implementations. As for\n mixing them with WSGI variables, separating them would just require\n two dictionary arguments to be passed around, while providing no\n real benefits.\n\n6. What about the status string? Can't we just use the number, passing\n in 200 instead of \"200 OK\"?\n\n Doing this would complicate the server or gateway, by requiring them\n to have a table of numeric statuses and corresponding messages. By\n contrast, it is easy for an application or framework author to type\n the extra text to go with the specific response code they are using,\n and existing frameworks often already have a table containing the\n needed messages. So, on balance it seems better to make the\n application/framework responsible, rather than the server or\n gateway.\n\n7. Why is wsgi.run_once not guaranteed to run the app only once?\n\n Because it's merely a suggestion to the application that it should\n \"rig for infrequent running\". This is intended for application\n frameworks that have multiple modes of operation for caching,\n sessions, and so forth. In a \"multiple run\" mode, such frameworks\n may preload caches, and may not write e.g. logs or session data to\n disk after each request. In \"single run\" mode, such frameworks avoid\n preloading and flush all necessary writes after each request.\n\n However, in order to test an application or framework to verify\n correct operation in the latter mode, it may be necessary (or at\n least expedient) to invoke it more than once. Therefore, an\n application should not assume that it will definitely not be run\n again, just because it is called with wsgi.run_once set to True.\n\n8. Feature X (dictionaries, callables, etc.) are ugly for use in\n application code; why don't we use objects instead?\n\n All of these implementation choices of WSGI are specifically\n intended to decouple features from one another; recombining these\n features into encapsulated objects makes it somewhat harder to write\n servers or gateways, and an order of magnitude harder to write\n middleware that replaces or modifies only small portions of the\n overall functionality.\n\n In essence, middleware wants to have a \"Chain of Responsibility\"\n pattern, whereby it can act as a \"handler\" for some functions, while\n allowing others to remain unchanged. This is difficult to do with\n ordinary Python objects, if the interface is to remain extensible.\n For example, one must use __getattr__ or __getattribute__ overrides,\n to ensure that extensions (such as attributes defined by future WSGI\n versions) are passed through.\n\n This type of code is notoriously difficult to get 100% correct, and\n few people will want to write it themselves. They will therefore\n copy other people's implementations, but fail to update them when\n the person they copied from corrects yet another corner case.\n\n Further, this necessary boilerplate would be pure excise, a\n developer tax paid by middleware developers to support a slightly\n prettier API for application framework developers. But, application\n framework developers will typically only be updating one framework\n to support WSGI, and in a very limited part of their framework as a\n whole. It will likely be their first (and maybe their only) WSGI\n implementation, and thus they will likely implement with this\n specification ready to hand. Thus, the effort of making the API\n \"prettier\" with object attributes and suchlike would likely be\n wasted for this audience.\n\n We encourage those who want a prettier (or otherwise improved) WSGI\n interface for use in direct web application programming (as opposed\n to web framework development) to develop APIs or frameworks that\n wrap WSGI for convenient use by application developers. In this way,\n WSGI can remain conveniently low-level for server and middleware\n authors, while not being \"ugly\" for application developers.\n\nProposed/Under Discussion\n\nThese items are currently being discussed on the Web-SIG and elsewhere,\nor are on the PEP author's \"to-do\" list:\n\n- Should wsgi.input be an iterator instead of a file? This would help\n for asynchronous applications and chunked-encoding input streams.\n- Optional extensions are being discussed for pausing iteration of an\n application's output until input is available or until a callback\n occurs.\n- Add a section about synchronous vs. asynchronous apps and servers,\n the relevant threading models, and issues/design goals in these\n areas.\n\nAcknowledgements\n\nThanks go to the many folks on the Web-SIG mailing list whose thoughtful\nfeedback made this revised draft possible. Especially:\n\n- Gregory \"Grisha\" Trubetskoy, author of mod_python, who beat up on\n the first draft as not offering any advantages over \"plain old CGI\",\n thus encouraging me to look for a better approach.\n- Ian Bicking, who helped nag me into properly specifying the\n multithreading and multiprocess options, as well as badgering me to\n provide a mechanism for servers to supply custom extension data to\n an application.\n- Tony Lownds, who came up with the concept of a start_response\n function that took the status and headers, returning a write\n function. His input also guided the design of the exception handling\n facilities, especially in the area of allowing for middleware that\n overrides application error messages.\n- Alan Kennedy, whose courageous attempts to implement WSGI-on-Jython\n (well before the spec was finalized) helped to shape the \"supporting\n older versions of Python\" section, as well as the optional\n wsgi.file_wrapper facility, and some of the early bytes/unicode\n decisions.\n- Mark Nottingham, who reviewed the spec extensively for issues with\n HTTP RFC compliance, especially with regard to HTTP/1.1 features\n that I didn't even know existed until he pointed them out.\n- Graham Dumpleton, who worked tirelessly (even in the face of my\n laziness and stupidity) to get some sort of Python 3 version of WSGI\n out, who proposed the \"native strings\" vs. \"byte strings\" concept,\n and thoughtfully wrestled through a great many HTTP, wsgi.input, and\n other amendments. Most, if not all, of the credit for this new PEP\n belongs to him.\n\nReferences\n\nCopyright\n\nThis document has been placed in the public domain.\n\n[1] Procedural issues regarding modifications to PEP 333\n(https://mail.python.org/pipermail/python-dev/2010-September/104114.html)\n\n[2] SVN revision history for PEP 3333, showing differences from PEP 333\n(http://svn.python.org/view/peps/trunk/pep-3333.txt?r1=84854&r2=HEAD)\n\n[3] The Python Wiki \"Web Programming\" topic\n(https://wiki.python.org/moin/WebProgramming)\n\n[4] The Common Gateway Interface Specification, v 1.1, 3rd Draft\n(https://datatracker.ietf.org/doc/html/draft-coar-cgi-v11-03)\n\n[5] mod_ssl Reference, \"Environment Variables\"\n(http://www.modssl.org/docs/2.8/ssl_reference.html#ToC25)"},"source":{"kind":"string","value":"python-peps"},"added":{"kind":"string","value":"2024-10-18T13:23:32.029621"},"created":{"kind":"timestamp","value":"2010-09-26T00:00:00","string":"2010-09-26T00:00:00"},"metadata":{"kind":"string","value":"{\n \"license\": \"Public Domain\",\n \"url\": \"https://peps.python.org/pep-3333/\",\n \"authors\": [\n \"Phillip J. Eby\"\n ],\n \"pep_number\": \"3333\",\n \"pandoc_version\": \"3.5\"\n}"}}},{"rowIdx":515,"cells":{"id":{"kind":"string","value":"0293"},"text":{"kind":"string","value":"PEP: 293 Title: Codec Error Handling Callbacks Version: $Revision$\nLast-Modified: $Date$ Author: Walter Dörwald \nStatus: Final Type: Standards Track Content-Type: text/x-rst Created:\n18-Jun-2002 Python-Version: 2.3 Post-History: 19-Jun-2002\n\nAbstract\n\nThis PEP aims at extending Python's fixed codec error handling schemes\nwith a more flexible callback based approach.\n\nPython currently uses a fixed error handling for codec error handlers.\nThis PEP describes a mechanism which allows Python to use function\ncallbacks as error handlers. With these more flexible error handlers it\nis possible to add new functionality to existing codecs by e.g.\nproviding fallback solutions or different encodings for cases where the\nstandard codec mapping does not apply.\n\nSpecification\n\nCurrently the set of codec error handling algorithms is fixed to either\n\"strict\", \"replace\" or \"ignore\" and the semantics of these algorithms is\nimplemented separately for each codec.\n\nThe proposed patch will make the set of error handling algorithms\nextensible through a codec error handler registry which maps handler\nnames to handler functions. This registry consists of the following two\nC functions:\n\n int PyCodec_RegisterError(const char *name, PyObject *error)\n\n PyObject *PyCodec_LookupError(const char *name)\n\nand their Python counterparts:\n\n codecs.register_error(name, error)\n\n codecs.lookup_error(name)\n\nPyCodec_LookupError raises a LookupError if no callback function has\nbeen registered under this name.\n\nSimilar to the encoding name registry there is no way of unregistering\ncallback functions or iterating through the available functions.\n\nThe callback functions will be used in the following way by the codecs:\nwhen the codec encounters an encoding/decoding error, the callback\nfunction is looked up by name, the information about the error is stored\nin an exception object and the callback is called with this object. The\ncallback returns information about how to proceed (or raises an\nexception).\n\nFor encoding, the exception object will look like this:\n\n class UnicodeEncodeError(UnicodeError):\n def __init__(self, encoding, object, start, end, reason):\n UnicodeError.__init__(self,\n \"encoding '%s' can't encode characters \" +\n \"in positions %d-%d: %s\" % (encoding,\n start, end-1, reason))\n self.encoding = encoding\n self.object = object\n self.start = start\n self.end = end\n self.reason = reason\n\nThis type will be implemented in C with the appropriate setter and\ngetter methods for the attributes, which have the following meaning:\n\n- encoding: The name of the encoding;\n- object: The original unicode object for which encode() has been\n called;\n- start: The position of the first unencodable character;\n- end: (The position of the last unencodable character)+1 (or the\n length of object, if all characters from start to the end of object\n are unencodable);\n- reason: The reason why object[start:end] couldn't be encoded.\n\nIf object has consecutive unencodable characters, the encoder should\ncollect those characters for one call to the callback if those\ncharacters can't be encoded for the same reason. The encoder is not\nrequired to implement this behaviour but may call the callback for every\nsingle character, but it is strongly suggested that the collecting\nmethod is implemented.\n\nThe callback must not modify the exception object. If the callback does\nnot raise an exception (either the one passed in, or a different one),\nit must return a tuple:\n\n (replacement, newpos)\n\nreplacement is a unicode object that the encoder will encode and emit\ninstead of the unencodable object[start:end] part, newpos specifies a\nnew position within object, where (after encoding the replacement) the\nencoder will continue encoding.\n\nNegative values for newpos are treated as being relative to end of\nobject. If newpos is out of bounds the encoder will raise an IndexError.\n\nIf the replacement string itself contains an unencodable character the\nencoder raises the exception object (but may set a different reason\nstring before raising).\n\nShould further encoding errors occur, the encoder is allowed to reuse\nthe exception object for the next call to the callback. Furthermore, the\nencoder is allowed to cache the result of codecs.lookup_error.\n\nIf the callback does not know how to handle the exception, it must raise\na TypeError.\n\nDecoding works similar to encoding with the following differences:\n\n- The exception class is named UnicodeDecodeError and the attribute\n object is the original 8bit string that the decoder is currently\n decoding.\n- The decoder will call the callback with those bytes that constitute\n one undecodable sequence, even if there is more than one undecodable\n sequence that is undecodable for the same reason directly after the\n first one. E.g. for the \"unicode-escape\" encoding, when decoding the\n illegal string \\\\u00\\\\u01x, the callback will be called twice (once\n for \\\\u00 and once for \\\\u01). This is done to be able to generate\n the correct number of replacement characters.\n- The replacement returned from the callback is a unicode object that\n will be emitted by the decoder as-is without further processing\n instead of the undecodable object[start:end] part.\n\nThere is a third API that uses the old strict/ignore/replace error\nhandling scheme:\n\n PyUnicode_TranslateCharmap/unicode.translate\n\nThe proposed patch will enhance PyUnicode_TranslateCharmap, so that it\nalso supports the callback registry. This has the additional side effect\nthat PyUnicode_TranslateCharmap will support multi-character replacement\nstrings (see SF feature request #403100[1]).\n\nFor PyUnicode_TranslateCharmap the exception class will be named\nUnicodeTranslateError. PyUnicode_TranslateCharmap will collect all\nconsecutive untranslatable characters (i.e. those that map to None) and\ncall the callback with them. The replacement returned from the callback\nis a unicode object that will be put in the translated result as-is,\nwithout further processing.\n\nAll encoders and decoders are allowed to implement the callback\nfunctionality themselves, if they recognize the callback name (i.e. if\nit is a system callback like \"strict\", \"replace\" and \"ignore\"). The\nproposed patch will add two additional system callback names:\n\"backslashreplace\" and \"xmlcharrefreplace\", which can be used for\nencoding and translating and which will also be implemented in-place for\nall encoders and PyUnicode_TranslateCharmap.\n\nThe Python equivalent of these five callbacks will look like this:\n\n def strict(exc):\n raise exc\n\n def ignore(exc):\n if isinstance(exc, UnicodeError):\n return (u\"\", exc.end)\n else:\n raise TypeError(\"can't handle %s\" % exc.__name__)\n\n def replace(exc):\n if isinstance(exc, UnicodeEncodeError):\n return ((exc.end-exc.start)*u\"?\", exc.end)\n elif isinstance(exc, UnicodeDecodeError):\n return (u\"\\\\ufffd\", exc.end)\n elif isinstance(exc, UnicodeTranslateError):\n return ((exc.end-exc.start)*u\"\\\\ufffd\", exc.end)\n else:\n raise TypeError(\"can't handle %s\" % exc.__name__)\n\n def backslashreplace(exc):\n if isinstance(exc,\n (UnicodeEncodeError, UnicodeTranslateError)):\n s = u\"\"\n for c in exc.object[exc.start:exc.end]:\n if ord(c)<=0xff:\n s += u\"\\\\x%02x\" % ord(c)\n elif ord(c)<=0xffff:\n s += u\"\\\\u%04x\" % ord(c)\n else:\n s += u\"\\\\U%08x\" % ord(c)\n return (s, exc.end)\n else:\n raise TypeError(\"can't handle %s\" % exc.__name__)\n\n def xmlcharrefreplace(exc):\n if isinstance(exc,\n (UnicodeEncodeError, UnicodeTranslateError)):\n s = u\"\"\n for c in exc.object[exc.start:exc.end]:\n s += u\"&#%d;\" % ord(c)\n return (s, exc.end)\n else:\n raise TypeError(\"can't handle %s\" % exc.__name__)\n\nThese five callback handlers will also be accessible to Python as\ncodecs.strict_error, codecs.ignore_error, codecs.replace_error,\ncodecs.backslashreplace_error and codecs.xmlcharrefreplace_error.\n\nRationale\n\nMost legacy encoding do not support the full range of Unicode\ncharacters. For these cases many high level protocols support a way of\nescaping a Unicode character (e.g. Python itself supports the \\x, \\u and\n\\U convention, XML supports character references via &#xxx; etc.).\n\nWhen implementing such an encoding algorithm, a problem with the current\nimplementation of the encode method of Unicode objects becomes apparent:\nFor determining which characters are unencodable by a certain encoding,\nevery single character has to be tried, because encode does not provide\nany information about the location of the error(s), so\n\n # (1)\n us = u\"xxx\"\n s = us.encode(encoding)\n\nhas to be replaced by\n\n # (2)\n us = u\"xxx\"\n v = []\n for c in us:\n try:\n v.append(c.encode(encoding))\n except UnicodeError:\n v.append(\"&#%d;\" % ord(c))\n s = \"\".join(v)\n\nThis slows down encoding dramatically as now the loop through the string\nis done in Python code and no longer in C code.\n\nFurthermore, this solution poses problems with stateful encodings. For\nexample, UTF-16 uses a Byte Order Mark at the start of the encoded byte\nstring to specify the byte order. Using (2) with UTF-16, results in an 8\nbit string with a BOM between every character.\n\nTo work around this problem, a stream writer - which keeps state between\ncalls to the encoding function - has to be used:\n\n # (3)\n us = u\"xxx\"\n import codecs, cStringIO as StringIO\n writer = codecs.getwriter(encoding)\n\n v = StringIO.StringIO()\n uv = writer(v)\n for c in us:\n try:\n uv.write(c)\n except UnicodeError:\n uv.write(u\"&#%d;\" % ord(c))\n s = v.getvalue()\n\nTo compare the speed of (1) and (3) the following test script has been\nused:\n\n # (4)\n import time\n us = u\"äa\"*1000000\n encoding = \"ascii\"\n import codecs, cStringIO as StringIO\n\n t1 = time.time()\n\n s1 = us.encode(encoding, \"replace\")\n\n t2 = time.time()\n\n writer = codecs.getwriter(encoding)\n\n v = StringIO.StringIO()\n uv = writer(v)\n for c in us:\n try:\n uv.write(c)\n except UnicodeError:\n uv.write(u\"?\")\n s2 = v.getvalue()\n\n t3 = time.time()\n\n assert(s1==s2)\n print \"1:\", t2-t1\n print \"2:\", t3-t2\n print \"factor:\", (t3-t2)/(t2-t1)\n\nOn Linux this gives the following output (with Python 2.3a0):\n\n 1: 0.274321913719\n 2: 51.1284689903\n factor: 186.381278466\n\ni.e. (3) is 180 times slower than (1).\n\nCallbacks must be stateless, because as soon as a callback is registered\nit is available globally and can be called by multiple encode() calls.\nTo be able to use stateful callbacks, the errors parameter for\nencode/decode/translate would have to be changed from char * to\nPyObject *, so that the callback could be used directly, without the\nneed to register the callback globally. As this requires changes to lots\nof C prototypes, this approach was rejected.\n\nCurrently all encoding/decoding functions have arguments\n\n const Py_UNICODE *p, int size\n\nor\n\n const char *p, int size\n\nto specify the unicode characters/8bit characters to be encoded/decoded.\nSo in case of an error the codec has to create a new unicode or str\nobject from these parameters and store it in the exception object. The\ncallers of these encoding/decoding functions extract these parameters\nfrom str/unicode objects themselves most of the time, so it could speed\nup error handling if these object were passed directly. As this again\nrequires changes to many C functions, this approach has been rejected.\n\nFor stream readers/writers the errors attribute must be changeable to be\nable to switch between different error handling methods during the\nlifetime of the stream reader/writer. This is currently the case for\ncodecs.StreamReader and codecs.StreamWriter and all their subclasses.\nAll core codecs and probably most of the third party codecs (e.g.\nJapaneseCodecs) derive their stream readers/writers from these classes\nso this already works, but the attribute errors should be documented as\na requirement.\n\nImplementation Notes\n\nA sample implementation is available as SourceForge patch #432401 [2]\nincluding a script for testing the speed of various\nstring/encoding/error combinations and a test script.\n\nCurrently the new exception classes are old style Python classes. This\nmeans that accessing attributes results in a dict lookup. The C API is\nimplemented in a way that makes it possible to switch to new style\nclasses behind the scene, if Exception (and UnicodeError) will be\nchanged to new style classes implemented in C for improved performance.\n\nThe class codecs.StreamReaderWriter uses the errors parameter for both\nreading and writing. To be more flexible this should probably be changed\nto two separate parameters for reading and writing.\n\nThe errors parameter of PyUnicode_TranslateCharmap is not availably to\nPython, which makes testing of the new functionality of\nPyUnicode_TranslateCharmap impossible with Python scripts. The patch\nshould add an optional argument errors to unicode.translate to expose\nthe functionality and make testing possible.\n\nCodecs that do something different than encoding/decoding from/to\nunicode and want to use the new machinery can define their own exception\nclasses and the strict handlers will automatically work with it. The\nother predefined error handlers are unicode specific and expect to get a\nUnicode(Encode|Decode|Translate)Error exception object so they won't\nwork.\n\nBackwards Compatibility\n\nThe semantics of unicode.encode with errors=\"replace\" has changed: The\nold version always stored a ? character in the output string even if no\ncharacter was mapped to ? in the mapping. With the proposed patch, the\nreplacement string from the callback will again be looked up in the\nmapping dictionary. But as all supported encodings are ASCII based, and\nthus map ? to ?, this should not be a problem in practice.\n\nIllegal values for the errors argument raised ValueError before, now\nthey will raise LookupError.\n\nReferences\n\nCopyright\n\nThis document has been placed in the public domain.\n\n[1] SF feature request #403100 \"Multicharacter replacements in\nPyUnicode_TranslateCharmap\" https://bugs.python.org/issue403100\n\n[2] SF patch #432401 \"unicode encoding error callbacks\"\nhttps://bugs.python.org/issue432401"},"source":{"kind":"string","value":"python-peps"},"added":{"kind":"string","value":"2024-10-18T13:23:32.048242"},"created":{"kind":"timestamp","value":"2002-06-18T00:00:00","string":"2002-06-18T00:00:00"},"metadata":{"kind":"string","value":"{\n \"license\": \"Public Domain\",\n \"url\": \"https://peps.python.org/pep-0293/\",\n \"authors\": [\n \"Walter Dörwald\"\n ],\n \"pep_number\": \"0293\",\n \"pandoc_version\": \"3.5\"\n}"}}},{"rowIdx":516,"cells":{"id":{"kind":"string","value":"0255"},"text":{"kind":"string","value":"PEP: 255 Title: Simple Generators Author: Neil Schemenauer\n, Tim Peters , Magnus Lie Hetland\n Status: Final Type: Standards Track Content-Type:\ntext/x-rst Requires: 234 Created: 18-May-2001 Python-Version: 2.2\nPost-History: 14-Jun-2001, 23-Jun-2001\n\nAbstract\n\nThis PEP introduces the concept of generators to Python, as well as a\nnew statement used in conjunction with them, the yield statement.\n\nMotivation\n\nWhen a producer function has a hard enough job that it requires\nmaintaining state between values produced, most programming languages\noffer no pleasant and efficient solution beyond adding a callback\nfunction to the producer's argument list, to be called with each value\nproduced.\n\nFor example, tokenize.py in the standard library takes this approach:\nthe caller must pass a tokeneater function to tokenize(), called\nwhenever tokenize() finds the next token. This allows tokenize to be\ncoded in a natural way, but programs calling tokenize are typically\nconvoluted by the need to remember between callbacks which token(s) were\nseen last. The tokeneater function in tabnanny.py is a good example of\nthat, maintaining a state machine in global variables, to remember\nacross callbacks what it has already seen and what it hopes to see next.\nThis was difficult to get working correctly, and is still difficult for\npeople to understand. Unfortunately, that's typical of this approach.\n\nAn alternative would have been for tokenize to produce an entire parse\nof the Python program at once, in a large list. Then tokenize clients\ncould be written in a natural way, using local variables and local\ncontrol flow (such as loops and nested if statements) to keep track of\ntheir state. But this isn't practical: programs can be very large, so no\na priori bound can be placed on the memory needed to materialize the\nwhole parse; and some tokenize clients only want to see whether\nsomething specific appears early in the program (e.g., a future\nstatement, or, as is done in IDLE, just the first indented statement),\nand then parsing the whole program first is a severe waste of time.\n\nAnother alternative would be to make tokenize an iterator <234>,\ndelivering the next token whenever its .next() method is invoked. This\nis pleasant for the caller in the same way a large list of results would\nbe, but without the memory and \"what if I want to get out early?\"\ndrawbacks. However, this shifts the burden on tokenize to remember its\nstate between .next() invocations, and the reader need only glance at\ntokenize.tokenize_loop() to realize what a horrid chore that would be.\nOr picture a recursive algorithm for producing the nodes of a general\ntree structure: to cast that into an iterator framework requires\nremoving the recursion manually and maintaining the state of the\ntraversal by hand.\n\nA fourth option is to run the producer and consumer in separate threads.\nThis allows both to maintain their states in natural ways, and so is\npleasant for both. Indeed, Demo/threads/Generator.py in the Python\nsource distribution provides a usable synchronized-communication class\nfor doing that in a general way. This doesn't work on platforms without\nthreads, though, and is very slow on platforms that do (compared to what\nis achievable without threads).\n\nA final option is to use the Stackless[1] (PEP 219) variant\nimplementation of Python instead, which supports lightweight coroutines.\nThis has much the same programmatic benefits as the thread option, but\nis much more efficient. However, Stackless is a controversial rethinking\nof the Python core, and it may not be possible for Jython to implement\nthe same semantics. This PEP isn't the place to debate that, so suffice\nit to say here that generators provide a useful subset of Stackless\nfunctionality in a way that fits easily into the current CPython\nimplementation, and is believed to be relatively straightforward for\nother Python implementations.\n\nThat exhausts the current alternatives. Some other high-level languages\nprovide pleasant solutions, notably iterators in Sather[2], which were\ninspired by iterators in CLU; and generators in Icon[3], a novel\nlanguage where every expression is a generator. There are differences\namong these, but the basic idea is the same: provide a kind of function\nthat can return an intermediate result (\"the next value\") to its caller,\nbut maintaining the function's local state so that the function can be\nresumed again right where it left off. A very simple example:\n\n def fib():\n a, b = 0, 1\n while 1:\n yield b\n a, b = b, a+b\n\nWhen fib() is first invoked, it sets a to 0 and b to 1, then yields b\nback to its caller. The caller sees 1. When fib is resumed, from its\npoint of view the yield statement is really the same as, say, a print\nstatement: fib continues after the yield with all local state intact. a\nand b then become 1 and 1, and fib loops back to the yield, yielding 1\nto its invoker. And so on. From fib's point of view it's just delivering\na sequence of results, as if via callback. But from its caller's point\nof view, the fib invocation is an iterable object that can be resumed at\nwill. As in the thread approach, this allows both sides to be coded in\nthe most natural ways; but unlike the thread approach, this can be done\nefficiently and on all platforms. Indeed, resuming a generator should be\nno more expensive than a function call.\n\nThe same kind of approach applies to many producer/consumer functions.\nFor example, tokenize.py could yield the next token instead of invoking\na callback function with it as argument, and tokenize clients could\niterate over the tokens in a natural way: a Python generator is a kind\nof Python iterator <234>, but of an especially powerful kind.\n\nSpecification: Yield\n\nA new statement is introduced:\n\n yield_stmt: \"yield\" expression_list\n\nyield is a new keyword, so a future statement (PEP 236) is needed to\nphase this in: in the initial release, a module desiring to use\ngenerators must include the line:\n\n from __future__ import generators\n\nnear the top (see PEP 236) for details). Modules using the identifier\nyield without a future statement will trigger warnings. In the following\nrelease, yield will be a language keyword and the future statement will\nno longer be needed.\n\nThe yield statement may only be used inside functions. A function that\ncontains a yield statement is called a generator function. A generator\nfunction is an ordinary function object in all respects, but has the new\nCO_GENERATOR flag set in the code object's co_flags member.\n\nWhen a generator function is called, the actual arguments are bound to\nfunction-local formal argument names in the usual way, but no code in\nthe body of the function is executed. Instead a generator-iterator\nobject is returned; this conforms to the iterator protocol <234>, so in\nparticular can be used in for-loops in a natural way. Note that when the\nintent is clear from context, the unqualified name \"generator\" may be\nused to refer either to a generator-function or a generator-iterator.\n\nEach time the .next() method of a generator-iterator is invoked, the\ncode in the body of the generator-function is executed until a yield or\nreturn statement (see below) is encountered, or until the end of the\nbody is reached.\n\nIf a yield statement is encountered, the state of the function is\nfrozen, and the value of expression_list is returned to .next()'s\ncaller. By \"frozen\" we mean that all local state is retained, including\nthe current bindings of local variables, the instruction pointer, and\nthe internal evaluation stack: enough information is saved so that the\nnext time .next() is invoked, the function can proceed exactly as if the\nyield statement were just another external call.\n\nRestriction: A yield statement is not allowed in the try clause of a\ntry/finally construct. The difficulty is that there's no guarantee the\ngenerator will ever be resumed, hence no guarantee that the finally\nblock will ever get executed; that's too much a violation of finally's\npurpose to bear.\n\nRestriction: A generator cannot be resumed while it is actively running:\n\n >>> def g():\n ... i = me.next()\n ... yield i\n >>> me = g()\n >>> me.next()\n Traceback (most recent call last):\n ...\n File \"\", line 2, in g\n ValueError: generator already executing\n\nSpecification: Return\n\nA generator function can also contain return statements of the form:\n\n return\n\nNote that an expression_list is not allowed on return statements in the\nbody of a generator (although, of course, they may appear in the bodies\nof non-generator functions nested within the generator).\n\nWhen a return statement is encountered, control proceeds as in any\nfunction return, executing the appropriate finally clauses (if any\nexist). Then a StopIteration exception is raised, signalling that the\niterator is exhausted. A StopIteration exception is also raised if\ncontrol flows off the end of the generator without an explicit return.\n\nNote that return means \"I'm done, and have nothing interesting to\nreturn\", for both generator functions and non-generator functions.\n\nNote that return isn't always equivalent to raising StopIteration: the\ndifference lies in how enclosing try/except constructs are treated. For\nexample,:\n\n >>> def f1():\n ... try:\n ... return\n ... except:\n ... yield 1\n >>> print list(f1())\n []\n\nbecause, as in any function, return simply exits, but:\n\n >>> def f2():\n ... try:\n ... raise StopIteration\n ... except:\n ... yield 42\n >>> print list(f2())\n [42]\n\nbecause StopIteration is captured by a bare except, as is any exception.\n\nSpecification: Generators and Exception Propagation\n\nIf an unhandled exception-- including, but not limited to, StopIteration\n--is raised by, or passes through, a generator function, then the\nexception is passed on to the caller in the usual way, and subsequent\nattempts to resume the generator function raise StopIteration. In other\nwords, an unhandled exception terminates a generator's useful life.\n\nExample (not idiomatic but to illustrate the point):\n\n >>> def f():\n ... return 1/0\n >>> def g():\n ... yield f() # the zero division exception propagates\n ... yield 42 # and we'll never get here\n >>> k = g()\n >>> k.next()\n Traceback (most recent call last):\n File \"\", line 1, in ?\n File \"\", line 2, in g\n File \"\", line 2, in f\n ZeroDivisionError: integer division or modulo by zero\n >>> k.next() # and the generator cannot be resumed\n Traceback (most recent call last):\n File \"\", line 1, in ?\n StopIteration\n >>>\n\nSpecification: Try/Except/Finally\n\nAs noted earlier, yield is not allowed in the try clause of a\ntry/finally construct. A consequence is that generators should allocate\ncritical resources with great care. There is no restriction on yield\notherwise appearing in finally clauses, except clauses, or in the try\nclause of a try/except construct:\n\n >>> def f():\n ... try:\n ... yield 1\n ... try:\n ... yield 2\n ... 1/0\n ... yield 3 # never get here\n ... except ZeroDivisionError:\n ... yield 4\n ... yield 5\n ... raise\n ... except:\n ... yield 6\n ... yield 7 # the \"raise\" above stops this\n ... except:\n ... yield 8\n ... yield 9\n ... try:\n ... x = 12\n ... finally:\n ... yield 10\n ... yield 11\n >>> print list(f())\n [1, 2, 4, 5, 8, 9, 10, 11]\n >>>\n\nExample\n\n # A binary tree class.\n class Tree:\n\n def __init__(self, label, left=None, right=None):\n self.label = label\n self.left = left\n self.right = right\n\n def __repr__(self, level=0, indent=\" \"):\n s = level*indent + `self.label`\n if self.left:\n s = s + \"\\n\" + self.left.__repr__(level+1, indent)\n if self.right:\n s = s + \"\\n\" + self.right.__repr__(level+1, indent)\n return s\n\n def __iter__(self):\n return inorder(self)\n\n # Create a Tree from a list.\n def tree(list):\n n = len(list)\n if n == 0:\n return []\n i = n / 2\n return Tree(list[i], tree(list[:i]), tree(list[i+1:]))\n\n # A recursive generator that generates Tree labels in in-order.\n def inorder(t):\n if t:\n for x in inorder(t.left):\n yield x\n yield t.label\n for x in inorder(t.right):\n yield x\n\n # Show it off: create a tree.\n t = tree(\"ABCDEFGHIJKLMNOPQRSTUVWXYZ\")\n # Print the nodes of the tree in in-order.\n for x in t:\n print x,\n print\n\n # A non-recursive generator.\n def inorder(node):\n stack = []\n while node:\n while node.left:\n stack.append(node)\n node = node.left\n yield node.label\n while not node.right:\n try:\n node = stack.pop()\n except IndexError:\n return\n yield node.label\n node = node.right\n\n # Exercise the non-recursive generator.\n for x in t:\n print x,\n print\n\nBoth output blocks display:\n\n A B C D E F G H I J K L M N O P Q R S T U V W X Y Z\n\nQ & A\n\nWhy not a new keyword instead of reusing def?\n\nSee BDFL Pronouncements section below.\n\nWhy a new keyword for yield? Why not a builtin function instead?\n\nControl flow is much better expressed via keyword in Python, and yield\nis a control construct. It's also believed that efficient implementation\nin Jython requires that the compiler be able to determine potential\nsuspension points at compile-time, and a new keyword makes that easy.\nThe CPython reference implementation also exploits it heavily, to detect\nwhich functions are generator-functions (although a new keyword in place\nof def would solve that for CPython -- but people asking the \"why a new\nkeyword?\" question don't want any new keyword).\n\nThen why not some other special syntax without a new keyword?\n\nFor example, one of these instead of yield 3:\n\n return 3 and continue\n return and continue 3\n return generating 3\n continue return 3\n return >> , 3\n from generator return 3\n return >> 3\n return << 3\n >> 3\n << 3\n * 3\n\nDid I miss one ? Out of hundreds of messages, I counted three\nsuggesting such an alternative, and extracted the above from them. It\nwould be nice not to need a new keyword, but nicer to make yield very\nclear -- I don't want to have to deduce that a yield is occurring from\nmaking sense of a previously senseless sequence of keywords or\noperators. Still, if this attracts enough interest, proponents should\nsettle on a single consensus suggestion, and Guido will Pronounce on it.\n\nWhy allow return at all? Why not force termination to be spelled raise StopIteration?\n\nThe mechanics of StopIteration are low-level details, much like the\nmechanics of IndexError in Python 2.1: the implementation needs to do\nsomething well-defined under the covers, and Python exposes these\nmechanisms for advanced users. That's not an argument for forcing\neveryone to work at that level, though. return means \"I'm done\" in any\nkind of function, and that's easy to explain and to use. Note that\nreturn isn't always equivalent to raise StopIteration in try/except\nconstruct, either (see the \"Specification: Return\" section).\n\nThen why not allow an expression on return too?\n\nPerhaps we will someday. In Icon, return expr means both \"I'm done\", and\n\"but I have one final useful value to return too, and this is it\". At\nthe start, and in the absence of compelling uses for return expr, it's\nsimply cleaner to use yield exclusively for delivering values.\n\nBDFL Pronouncements\n\nIssue\n\nIntroduce another new keyword (say, gen or generator) in place of def,\nor otherwise alter the syntax, to distinguish generator-functions from\nnon-generator functions.\n\nCon\n\nIn practice (how you think about them), generators are functions, but\nwith the twist that they're resumable. The mechanics of how they're set\nup is a comparatively minor technical issue, and introducing a new\nkeyword would unhelpfully overemphasize the mechanics of how generators\nget started (a vital but tiny part of a generator's life).\n\nPro\n\nIn reality (how you think about them), generator-functions are actually\nfactory functions that produce generator-iterators as if by magic. In\nthis respect they're radically different from non-generator functions,\nacting more like a constructor than a function, so reusing def is at\nbest confusing. A yield statement buried in the body is not enough\nwarning that the semantics are so different.\n\nBDFL\n\ndef it stays. No argument on either side is totally convincing, so I\nhave consulted my language designer's intuition. It tells me that the\nsyntax proposed in the PEP is exactly right - not too hot, not too cold.\nBut, like the Oracle at Delphi in Greek mythology, it doesn't tell me\nwhy, so I don't have a rebuttal for the arguments against the PEP\nsyntax. The best I can come up with (apart from agreeing with the\nrebuttals ... already made) is \"FUD\". If this had been part of the\nlanguage from day one, I very much doubt it would have made Andrew\nKuchling's \"Python Warts\" page.\n\nReference Implementation\n\nThe current implementation, in a preliminary state (no docs, but well\ntested and solid), is part of Python's CVS development tree[4]. Using\nthis requires that you build Python from source.\n\nThis was derived from an earlier patch by Neil Schemenauer[5].\n\nFootnotes and References\n\nCopyright\n\nThis document has been placed in the public domain.\n\n[1] http://www.stackless.com/\n\n[2] \"Iteration Abstraction in Sather\" Murer, Omohundro, Stoutamire and\nSzyperski http://www.icsi.berkeley.edu/~sather/Publications/toplas.html\n\n[3] http://www.cs.arizona.edu/icon/\n\n[4] To experiment with this implementation, check out Python from CVS\naccording to the instructions at http://sf.net/cvs/?group_id=5470 Note\nthat the std test Lib/test/test_generators.py contains many examples,\nincluding all those in this PEP.\n\n[5] http://python.ca/nas/python/generator.diff"},"source":{"kind":"string","value":"python-peps"},"added":{"kind":"string","value":"2024-10-18T13:23:32.067769"},"created":{"kind":"timestamp","value":"2001-05-18T00:00:00","string":"2001-05-18T00:00:00"},"metadata":{"kind":"string","value":"{\n \"license\": \"Public Domain\",\n \"url\": \"https://peps.python.org/pep-0255/\",\n \"authors\": [\n \"Neil Schemenauer\"\n ],\n \"pep_number\": \"0255\",\n \"pandoc_version\": \"3.5\"\n}"}}},{"rowIdx":517,"cells":{"id":{"kind":"string","value":"0433"},"text":{"kind":"string","value":"PEP: 433 Title: Easier suppression of file descriptor inheritance\nVersion: $Revision$ Last-Modified: $Date$ Author: Victor Stinner\n Status: Superseded Type: Standards Track\nContent-Type: text/x-rst Created: 10-Jan-2013 Python-Version: 3.4\nSuperseded-By: 446\n\nAbstract\n\nAdd a new optional cloexec parameter on functions creating file\ndescriptors, add different ways to change default values of this\nparameter, and add four new functions:\n\n- os.get_cloexec(fd)\n- os.set_cloexec(fd, cloexec=True)\n- sys.getdefaultcloexec()\n- sys.setdefaultcloexec(cloexec)\n\nRationale\n\nA file descriptor has a close-on-exec flag which indicates if the file\ndescriptor will be inherited or not.\n\nOn UNIX, if the close-on-exec flag is set, the file descriptor is not\ninherited: it will be closed at the execution of child processes;\notherwise the file descriptor is inherited by child processes.\n\nOn Windows, if the close-on-exec flag is set, the file descriptor is not\ninherited; the file descriptor is inherited by child processes if the\nclose-on-exec flag is cleared and if CreateProcess() is called with the\nbInheritHandles parameter set to TRUE (when subprocess.Popen is created\nwith close_fds=False for example). Windows does not have \"close-on-exec\"\nflag but an inheritance flag which is just the opposite value. For\nexample, setting close-on-exec flag means clearing the\nHANDLE_FLAG_INHERIT flag of a handle.\n\nStatus in Python 3.3\n\nOn UNIX, the subprocess module closes file descriptors greater than 2 by\ndefault since Python 3.2[1]. All file descriptors created by the parent\nprocess are automatically closed in the child process.\n\nxmlrpc.server.SimpleXMLRPCServer sets the close-on-exec flag of the\nlistening socket, the parent class socketserver.TCPServer does not set\nthis flag.\n\nThere are other cases creating a subprocess or executing a new program\nwhere file descriptors are not closed: functions of the os.spawn*() and\nthe os.exec*() families and third party modules calling exec() or\nfork() + exec(). In this case, file descriptors are shared between the\nparent and the child processes which is usually unexpected and causes\nvarious issues.\n\nThis PEP proposes to continue the work started with the change in the\nsubprocess in Python 3.2, to fix the issue in any code, and not just\ncode using subprocess.\n\nInherited file descriptors issues\n\nClosing the file descriptor in the parent process does not close the\nrelated resource (file, socket, ...) because it is still open in the\nchild process.\n\nThe listening socket of TCPServer is not closed on exec(): the child\nprocess is able to get connection from new clients; if the parent closes\nthe listening socket and create a new listening socket on the same\naddress, it would get an \"address already is used\" error.\n\nNot closing file descriptors can lead to resource exhaustion: even if\nthe parent closes all files, creating a new file descriptor may fail\nwith \"too many files\" because files are still open in the child process.\n\nSee also the following issues:\n\n- Issue #2320: Race condition in subprocess using stdin (2008)\n- Issue #3006: subprocess.Popen causes socket to remain open after\n close (2008)\n- Issue #7213: subprocess leaks open file descriptors between Popen\n instances causing hangs (2009)\n- Issue #12786: subprocess wait() hangs when stdin is closed (2011)\n\nSecurity\n\nLeaking file descriptors is a major security vulnerability. An untrusted\nchild process can read sensitive data like passwords and take control of\nthe parent process though leaked file descriptors. It is for example a\nknown vulnerability to escape from a chroot.\n\nSee also the CERT recommendation: FIO42-C. Ensure files are properly\nclosed when they are no longer needed.\n\nExample of vulnerabilities:\n\n- OpenSSH Security Advisory: portable-keysign-rand-helper.adv (April\n 2011)\n- CWE-403: Exposure of File Descriptor to Unintended Control Sphere\n (2008)\n- Hijacking Apache https by mod_php (Dec 2003)\n - Apache: Apr should set FD_CLOEXEC if APR_FOPEN_NOCLEANUP is not\n set (fixed in 2009)\n - PHP: system() (and similar) don't cleanup opened handles of\n Apache (not fixed in January 2013)\n\nAtomicity\n\nUsing fcntl() to set the close-on-exec flag is not safe in a\nmultithreaded application. If a thread calls fork() and exec() between\nthe creation of the file descriptor and the call to\nfcntl(fd, F_SETFD, new_flags): the file descriptor will be inherited by\nthe child process. Modern operating systems offer functions to set the\nflag during the creation of the file descriptor, which avoids the race\ncondition.\n\nPortability\n\nPython 3.2 added socket.SOCK_CLOEXEC flag, Python 3.3 added os.O_CLOEXEC\nflag and os.pipe2() function. It is already possible to set atomically\nclose-on-exec flag in Python 3.3 when opening a file and creating a pipe\nor socket.\n\nThe problem is that these flags and functions are not portable: only\nrecent versions of operating systems support them. O_CLOEXEC and\nSOCK_CLOEXEC flags are ignored by old Linux versions and so FD_CLOEXEC\nflag must be checked using fcntl(fd, F_GETFD). If the kernel ignores\nO_CLOEXEC or SOCK_CLOEXEC flag, a call to fcntl(fd, F_SETFD, flags) is\nrequired to set close-on-exec flag.\n\nNote\n\nOpenBSD older 5.2 does not close the file descriptor with close-on-exec\nflag set if fork() is used before exec(), but it works correctly if\nexec() is called without fork(). Try openbsd_bug.py.\n\nScope\n\nApplications still have to close explicitly file descriptors after a\nfork(). The close-on-exec flag only closes file descriptors after\nexec(), and so after fork() + exec().\n\nThis PEP only change the close-on-exec flag of file descriptors created\nby the Python standard library, or by modules using the standard\nlibrary. Third party modules not using the standard library should be\nmodified to conform to this PEP. The new os.set_cloexec() function can\nbe used for example.\n\nNote\n\nSee Close file descriptors after fork for a possible solution for fork()\nwithout exec().\n\nProposal\n\nAdd a new optional cloexec parameter on functions creating file\ndescriptors and different ways to change default value of this\nparameter.\n\nAdd new functions:\n\n- os.get_cloexec(fd:int) -> bool: get the close-on-exec flag of a file\n descriptor. Not available on all platforms.\n- os.set_cloexec(fd:int, cloexec:bool=True): set or clear the\n close-on-exec flag on a file descriptor. Not available on all\n platforms.\n- sys.getdefaultcloexec() -> bool: get the current default value of\n the cloexec parameter\n- sys.setdefaultcloexec(cloexec: bool): set the default value of the\n cloexec parameter\n\nAdd a new optional cloexec parameter to:\n\n- asyncore.dispatcher.create_socket()\n- io.FileIO\n- io.open()\n- open()\n- os.dup()\n- os.dup2()\n- os.fdopen()\n- os.open()\n- os.openpty()\n- os.pipe()\n- select.devpoll()\n- select.epoll()\n- select.kqueue()\n- socket.socket()\n- socket.socket.accept()\n- socket.socket.dup()\n- socket.socket.fromfd\n- socket.socketpair()\n\nThe default value of the cloexec parameter is sys.getdefaultcloexec().\n\nAdd a new command line option -e and an environment variable\nPYTHONCLOEXEC to the set close-on-exec flag by default.\n\nsubprocess clears the close-on-exec flag of file descriptors of the\npass_fds parameter.\n\nAll functions creating file descriptors in the standard library must\nrespect the default value of the cloexec parameter:\nsys.getdefaultcloexec().\n\nFile descriptors 0 (stdin), 1 (stdout) and 2 (stderr) are expected to be\ninherited, but Python does not handle them differently. When os.dup2()\nis used to replace standard streams, cloexec=False must be specified\nexplicitly.\n\nDrawbacks of the proposal:\n\n- It is not more possible to know if the close-on-exec flag will be\n set or not on a newly created file descriptor just by reading the\n source code.\n- If the inheritance of a file descriptor matters, the cloexec\n parameter must now be specified explicitly, or the library or the\n application will not work depending on the default value of the\n cloexec parameter.\n\nAlternatives\n\nInheritance enabled by default, default no configurable\n\nAdd a new optional parameter cloexec on functions creating file\ndescriptors. The default value of the cloexec parameter is False, and\nthis default cannot be changed. File descriptor inheritance enabled by\ndefault is also the default on POSIX and on Windows. This alternative is\nthe most conservative option.\n\nThis option does not solve issues listed in the Rationale section, it\nonly provides a helper to fix them. All functions creating file\ndescriptors have to be modified to set cloexec=True in each module used\nby an application to fix all these issues.\n\nInheritance enabled by default, default can only be set to True\n\nThis alternative is based on the proposal: the only difference is that\nsys.setdefaultcloexec() does not take any argument, it can only be used\nto set the default value of the cloexec parameter to True.\n\nDisable inheritance by default\n\nThis alternative is based on the proposal: the only difference is that\nthe default value of the cloexec parameter is True (instead of False).\n\nIf a file must be inherited by child processes, cloexec=False parameter\ncan be used.\n\nAdvantages of setting close-on-exec flag by default:\n\n- There are far more programs that are bitten by FD inheritance upon\n exec (see Inherited file descriptors issues and Security) than\n programs relying on it (see Applications using inheritance of file\n descriptors).\n\nDrawbacks of setting close-on-exec flag by default:\n\n- It violates the principle of least surprise. Developers using the os\n module may expect that Python respects the POSIX standard and so\n that close-on-exec flag is not set by default.\n- The os module is written as a thin wrapper to system calls (to\n functions of the C standard library). If atomic flags to set\n close-on-exec flag are not supported (see Appendix: Operating system\n support), a single Python function call may call 2 or 3 system calls\n (see Performances section).\n- Extra system calls, if any, may slow down Python: see Performances.\n\nBackward compatibility: only a few programs rely on inheritance of file\ndescriptors, and they only pass a few file descriptors, usually just\none. These programs will fail immediately with EBADF error, and it will\nbe simple to fix them: add cloexec=False parameter or use\nos.set_cloexec(fd, False).\n\nThe subprocess module will be changed anyway to clear close-on-exec flag\non file descriptors listed in the pass_fds parameter of Popen\nconstructor. So it possible that these programs will not need any fix if\nthey use the subprocess module.\n\nClose file descriptors after fork\n\nThis PEP does not fix issues with applications using fork() without\nexec(). Python needs a generic process to register callbacks which would\nbe called after a fork, see #16500: Add an atfork module. Such registry\ncould be used to close file descriptors just after a fork().\n\nDrawbacks:\n\n- It does not solve the problem on Windows: fork() does not exist on\n Windows\n- This alternative does not solve the problem for programs using\n exec() without fork().\n- A third party module may call directly the C function fork() which\n will not call \"atfork\" callbacks.\n- All functions creating file descriptors must be changed to register\n a callback and then unregister their callback when the file is\n closed. Or a list of all open file descriptors must be maintained.\n- The operating system is a better place than Python to close\n automatically file descriptors. For example, it is not easy to avoid\n a race condition between closing the file and unregistering the\n callback closing the file.\n\nopen(): add \"e\" flag to mode\n\nA new \"e\" mode would set close-on-exec flag (best-effort).\n\nThis alternative only solves the problem for open(). socket.socket() and\nos.pipe() do not have a mode parameter for example.\n\nSince its version 2.7, the GNU libc supports \"e\" flag for fopen(). It\nuses O_CLOEXEC if available, or use fcntl(fd, F_SETFD, FD_CLOEXEC). With\nVisual Studio, fopen() accepts a \"N\" flag which uses O_NOINHERIT.\n\nBikeshedding on the name of the new parameter\n\n- inherit, inherited: closer to Windows definition\n- sensitive\n- sterile: \"Does not produce offspring.\"\n\nApplications using inheritance of file descriptors\n\nMost developers don't know that file descriptors are inherited by\ndefault. Most programs do not rely on inheritance of file descriptors.\nFor example, subprocess.Popen was changed in Python 3.2 to close all\nfile descriptors greater than 2 in the child process by default. No user\ncomplained about this behavior change yet.\n\nNetwork servers using fork may want to pass the client socket to the\nchild process. For example, on UNIX a CGI server pass the socket client\nthrough file descriptors 0 (stdin) and 1 (stdout) using dup2().\n\nTo access a restricted resource like creating a socket listening on a\nTCP port lower than 1024 or reading a file containing sensitive data\nlike passwords, a common practice is: start as the root user, create a\nfile descriptor, create a child process, drop privileges (ex: change the\ncurrent user), pass the file descriptor to the child process and exit\nthe parent process.\n\nSecurity is very important in such use case: leaking another file\ndescriptor would be a critical security vulnerability (see Security).\nThe root process may not exit but monitors the child process instead,\nand restarts a new child process and pass the same file descriptor if\nthe previous child process crashed.\n\nExample of programs taking file descriptors from the parent process\nusing a command line option:\n\n- gpg: --status-fd , --logger-fd , etc.\n- openssl: -pass fd:\n- qemu: -add-fd \n- valgrind: --log-fd=, --input-fd=, etc.\n- xterm: -S \n\nOn Linux, it is possible to use \"/dev/fd/\" filename to pass a file\ndescriptor to a program expecting a filename.\n\nPerformances\n\nSetting close-on-exec flag may require additional system calls for each\ncreation of new file descriptors. The number of additional system calls\ndepends on the method used to set the flag:\n\n- O_NOINHERIT: no additional system call\n- O_CLOEXEC: one additional system call, but only at the creation of\n the first file descriptor, to check if the flag is supported. If the\n flag is not supported, Python has to fallback to the next method.\n- ioctl(fd, FIOCLEX): one additional system call per file descriptor\n- fcntl(fd, F_SETFD, flags): two additional system calls per file\n descriptor, one to get old flags and one to set new flags\n\nOn Linux, setting the close-on-flag has a low overhead on performances.\nResults of bench_cloexec.py on Linux 3.6:\n\n- close-on-flag not set: 7.8 us\n- O_CLOEXEC: 1% slower (7.9 us)\n- ioctl(): 3% slower (8.0 us)\n- fcntl(): 3% slower (8.0 us)\n\nImplementation\n\nos.get_cloexec(fd)\n\nGet the close-on-exec flag of a file descriptor.\n\nPseudo-code:\n\n if os.name == 'nt':\n def get_cloexec(fd):\n handle = _winapi._get_osfhandle(fd);\n flags = _winapi.GetHandleInformation(handle)\n return not(flags & _winapi.HANDLE_FLAG_INHERIT)\n else:\n try:\n import fcntl\n except ImportError:\n pass\n else:\n def get_cloexec(fd):\n flags = fcntl.fcntl(fd, fcntl.F_GETFD)\n return bool(flags & fcntl.FD_CLOEXEC)\n\nos.set_cloexec(fd, cloexec=True)\n\nSet or clear the close-on-exec flag on a file descriptor. The flag is\nset after the creation of the file descriptor and so it is not atomic.\n\nPseudo-code:\n\n if os.name == 'nt':\n def set_cloexec(fd, cloexec=True):\n handle = _winapi._get_osfhandle(fd);\n mask = _winapi.HANDLE_FLAG_INHERIT\n if cloexec:\n flags = 0\n else:\n flags = mask\n _winapi.SetHandleInformation(handle, mask, flags)\n else:\n fnctl = None\n ioctl = None\n try:\n import ioctl\n except ImportError:\n try:\n import fcntl\n except ImportError:\n pass\n if ioctl is not None and hasattr('FIOCLEX', ioctl):\n def set_cloexec(fd, cloexec=True):\n if cloexec:\n ioctl.ioctl(fd, ioctl.FIOCLEX)\n else:\n ioctl.ioctl(fd, ioctl.FIONCLEX)\n elif fnctl is not None:\n def set_cloexec(fd, cloexec=True):\n flags = fcntl.fcntl(fd, fcntl.F_GETFD)\n if cloexec:\n flags |= FD_CLOEXEC\n else:\n flags &= ~FD_CLOEXEC\n fcntl.fcntl(fd, fcntl.F_SETFD, flags)\n\nioctl is preferred over fcntl because it requires only one syscall,\ninstead of two syscalls for fcntl.\n\nNote\n\nfcntl(fd, F_SETFD, flags) only supports one flag (FD_CLOEXEC), so it\nwould be possible to avoid fcntl(fd, F_GETFD). But it may drop other\nflags in the future, and so it is safer to keep the two functions calls.\n\nNote\n\nfopen() function of the GNU libc ignores the error if\nfcntl(fd, F_SETFD, flags) failed.\n\nopen()\n\n- Windows: open() with O_NOINHERIT flag [atomic]\n- open() with O_CLOEXEC flag [atomic]\n- open() + os.set_cloexec(fd, True) [best-effort]\n\nos.dup()\n\n- Windows: DuplicateHandle() [atomic]\n- fcntl(fd, F_DUPFD_CLOEXEC) [atomic]\n- dup() + os.set_cloexec(fd, True) [best-effort]\n\nos.dup2()\n\n- fcntl(fd, F_DUP2FD_CLOEXEC, fd2) [atomic]\n- dup3() with O_CLOEXEC flag [atomic]\n- dup2() + os.set_cloexec(fd, True) [best-effort]\n\nos.pipe()\n\n- Windows: CreatePipe() with SECURITY_ATTRIBUTES.bInheritHandle=TRUE,\n or _pipe() with O_NOINHERIT flag [atomic]\n- pipe2() with O_CLOEXEC flag [atomic]\n- pipe() + os.set_cloexec(fd, True) [best-effort]\n\nsocket.socket()\n\n- Windows: WSASocket() with WSA_FLAG_NO_HANDLE_INHERIT flag [atomic]\n- socket() with SOCK_CLOEXEC flag [atomic]\n- socket() + os.set_cloexec(fd, True) [best-effort]\n\nsocket.socketpair()\n\n- socketpair() with SOCK_CLOEXEC flag [atomic]\n- socketpair() + os.set_cloexec(fd, True) [best-effort]\n\nsocket.socket.accept()\n\n- accept4() with SOCK_CLOEXEC flag [atomic]\n- accept() + os.set_cloexec(fd, True) [best-effort]\n\nBackward compatibility\n\nThere is no backward incompatible change. The default behaviour is\nunchanged: the close-on-exec flag is not set by default.\n\nAppendix: Operating system support\n\nWindows\n\nWindows has an O_NOINHERIT flag: \"Do not inherit in child processes\".\n\nFor example, it is supported by open() and _pipe().\n\nThe flag can be cleared using\nSetHandleInformation(fd, HANDLE_FLAG_INHERIT, 0).\n\nCreateProcess() has an bInheritHandles parameter: if it is FALSE, the\nhandles are not inherited. If it is TRUE, handles with\nHANDLE_FLAG_INHERIT flag set are inherited. subprocess.Popen uses\nclose_fds option to define bInheritHandles.\n\nioctl\n\nFunctions:\n\n- ioctl(fd, FIOCLEX, 0): set the close-on-exec flag\n- ioctl(fd, FIONCLEX, 0): clear the close-on-exec flag\n\nAvailability: Linux, Mac OS X, QNX, NetBSD, OpenBSD, FreeBSD.\n\nfcntl\n\nFunctions:\n\n- flags = fcntl(fd, F_GETFD); fcntl(fd, F_SETFD, flags | FD_CLOEXEC):\n set the close-on-exec flag\n- flags = fcntl(fd, F_GETFD); fcntl(fd, F_SETFD, flags & ~FD_CLOEXEC):\n clear the close-on-exec flag\n\nAvailability: AIX, Digital UNIX, FreeBSD, HP-UX, IRIX, Linux, Mac OS X,\nOpenBSD, Solaris, SunOS, Unicos.\n\nAtomic flags\n\nNew flags:\n\n- O_CLOEXEC: available on Linux (2.6.23), FreeBSD (8.3), OpenBSD 5.0,\n Solaris 11, QNX, BeOS, next NetBSD release (6.1?). This flag is part\n of POSIX.1-2008.\n- SOCK_CLOEXEC flag for socket() and socketpair(), available on Linux\n 2.6.27, OpenBSD 5.2, NetBSD 6.0.\n- WSA_FLAG_NO_HANDLE_INHERIT flag for WSASocket(): supported on\n Windows 7 with SP1, Windows Server 2008 R2 with SP1, and later\n- fcntl(): F_DUPFD_CLOEXEC flag, available on Linux 2.6.24, OpenBSD\n 5.0, FreeBSD 9.1, NetBSD 6.0, Solaris 11. This flag is part of\n POSIX.1-2008.\n- fcntl(): F_DUP2FD_CLOEXEC flag, available on FreeBSD 9.1 and Solaris\n 11.\n- recvmsg(): MSG_CMSG_CLOEXEC, available on Linux 2.6.23, NetBSD 6.0.\n\nOn Linux older than 2.6.23, O_CLOEXEC flag is simply ignored. So we have\nto check that the flag is supported by calling fcntl(). If it does not\nwork, we have to set the flag using ioctl() or fcntl().\n\nOn Linux older than 2.6.27, if the SOCK_CLOEXEC flag is set in the\nsocket type, socket() or socketpair() fail and errno is set to EINVAL.\n\nOn Windows XPS3, WSASocket() with WSAEPROTOTYPE when\nWSA_FLAG_NO_HANDLE_INHERIT flag is used.\n\nNew functions:\n\n- dup3(): available on Linux 2.6.27 (and glibc 2.9)\n- pipe2(): available on Linux 2.6.27 (and glibc 2.9)\n- accept4(): available on Linux 2.6.28 (and glibc 2.10)\n\nIf accept4() is called on Linux older than 2.6.28, accept4() returns -1\n(fail) and errno is set to ENOSYS.\n\nLinks\n\nLinks:\n\n- Secure File Descriptor Handling (Ulrich Drepper, 2008)\n- win32_support.py of the Tornado project: emulate fcntl(fd, F_SETFD,\n FD_CLOEXEC) using SetHandleInformation(fd, HANDLE_FLAG_INHERIT, 1)\n- LKML: [PATCH] nextfd(2)\n\nPython issues:\n\n- #10115: Support accept4() for atomic setting of flags at socket\n creation\n- #12105: open() does not able to set flags, such as O_CLOEXEC\n- #12107: TCP listening sockets created without FD_CLOEXEC flag\n- #16500: Add an atfork module\n- #16850: Add \"e\" mode to open(): close-and-exec (O_CLOEXEC) /\n O_NOINHERIT\n- #16860: Use O_CLOEXEC in the tempfile module\n- #17036: Implementation of the PEP 433\n- #16946: subprocess: _close_open_fd_range_safe() does not set\n close-on-exec flag on Linux < 2.6.23 if O_CLOEXEC is defined\n \n- #17070: PEP 433: Use the new cloexec to improve security and avoid\n bugs\n\nOther languages:\n\n- Perl sets the close-on-exec flag on newly created file descriptor if\n their number is greater than $SYSTEM_FD_MAX ($^F). See\n $SYSTEM_FD_MAX documentation. Perl does this since the creation of\n Perl (it was already present in Perl 1).\n- Ruby: Set FD_CLOEXEC for all fds (except 0, 1, 2)\n- Ruby: O_CLOEXEC flag missing for Kernel::open: the commit was\n reverted later\n- OCaml: PR#5256: Processes opened using Unix.open_process* inherit\n all opened file descriptors (including sockets). OCaml has a\n Unix.set_close_on_exec function.\n\nFootnotes\n\nCopyright\n\nThis document has been placed in the public domain.\n\n\f\n\n Local Variables: mode: indented-text indent-tabs-mode: nil\n sentence-end-double-space: t fill-column: 70 coding: utf-8 End:\n\n[1] On UNIX since Python 3.2, subprocess.Popen() closes all file\ndescriptors by default: close_fds=True. It closes file descriptors in\nrange 3 inclusive to local_max_fd exclusive, where local_max_fd is\nfcntl(0, F_MAXFD) on NetBSD, or sysconf(_SC_OPEN_MAX) otherwise. If the\nerror pipe has a descriptor smaller than 3, ValueError is raised."},"source":{"kind":"string","value":"python-peps"},"added":{"kind":"string","value":"2024-10-18T13:23:32.117366"},"created":{"kind":"timestamp","value":"2013-01-10T00:00:00","string":"2013-01-10T00:00:00"},"metadata":{"kind":"string","value":"{\n \"license\": \"Public Domain\",\n \"url\": \"https://peps.python.org/pep-0433/\",\n \"authors\": [\n \"Victor Stinner\"\n ],\n \"pep_number\": \"0433\",\n \"pandoc_version\": \"3.5\"\n}"}}},{"rowIdx":518,"cells":{"id":{"kind":"string","value":"0661"},"text":{"kind":"string","value":"PEP: 661 Title: Sentinel Values Author: Tal Einat \nDiscussions-To:\nhttps://discuss.python.org/t/pep-661-sentinel-values/9126 Status: Draft\nType: Standards Track Content-Type: text/x-rst Created: 06-Jun-2021\nPost-History: 20-May-2021, 06-Jun-2021\n\nTL;DR: See the Specification and Reference Implementation.\n\nAbstract\n\nUnique placeholder values, commonly known as \"sentinel values\", are\ncommon in programming. They have many uses, such as for:\n\n- Default values for function arguments, for when a value was not\n given:\n\n def foo(value=None):\n ...\n\n- Return values from functions when something is not found or\n unavailable:\n\n >>> \"abc\".find(\"d\")\n -1\n\n- Missing data, such as NULL in relational databases or \"N/A\" (\"not\n available\") in spreadsheets\n\nPython has the special value None, which is intended to be used as such\na sentinel value in most cases. However, sometimes an alternative\nsentinel value is needed, usually when it needs to be distinct from None\nsince None is a valid value in that context. Such cases are common\nenough that several idioms for implementing such sentinels have arisen\nover the years, but uncommon enough that there hasn't been a clear need\nfor standardization. However, the common implementations, including some\nin the stdlib, suffer from several significant drawbacks.\n\nThis PEP proposes adding a utility for defining sentinel values, to be\nused in the stdlib and made publicly available as part of the stdlib.\n\nNote: Changing all existing sentinels in the stdlib to be implemented\nthis way is not deemed necessary, and whether to do so is left to the\ndiscretion of the maintainers.\n\nMotivation\n\nIn May 2021, a question was brought up on the python-dev mailing list\n[1] about how to better implement a sentinel value for\ntraceback.print_exception. The existing implementation used the\nfollowing common idiom:\n\n _sentinel = object()\n\nHowever, this object has an uninformative and overly verbose repr,\ncausing the function's signature to be overly long and hard to read:\n\n >>> help(traceback.print_exception)\n Help on function print_exception in module traceback:\n\n print_exception(exc, /, value=, tb=,\n limit=None, file=None, chain=True)\n\nAdditionally, two other drawbacks of many existing sentinels were\nbrought up in the discussion:\n\n1. Some do not have a distinct type, hence it is impossible to define\n clear type signatures for functions with such sentinels as default\n values.\n2. They behave unexpectedly after being copied or unpickled, due to a\n separate instance being created and thus comparisons using is\n failing.\n\nIn the ensuing discussion, Victor Stinner supplied a list of currently\nused sentinel values in the Python standard library[2]. This showed that\nthe need for sentinels is fairly common, that there are various\nimplementation methods used even within the stdlib, and that many of\nthese suffer from at least one of the three above drawbacks.\n\nThe discussion did not lead to any clear consensus on whether a standard\nimplementation method is needed or desirable, whether the drawbacks\nmentioned are significant, nor which kind of implementation would be\ngood. The author of this PEP created an issue on bugs.python.org (now a\nGitHub issue[3]) suggesting options for improvement, but that focused on\nonly a single problematic aspect of a few cases, and failed to gather\nany support.\n\nA poll[4] was created on discuss.python.org to get a clearer sense of\nthe community's opinions. After nearly two weeks, significant further,\ndiscussion, and 39 votes, the poll's results were not conclusive. 40%\nhad voted for \"The status-quo is fine / there’s no need for consistency\nin this\", but most voters had voted for one or more standardized\nsolutions. Specifically, 37% of the voters chose \"Consistent use of a\nnew, dedicated sentinel factory / class / meta-class, also made publicly\navailable in the stdlib\".\n\nWith such mixed opinions, this PEP was created to facilitate making a\ndecision on the subject.\n\nWhile working on this PEP, iterating on various options and\nimplementations and continuing discussions, the author has come to the\nopinion that a simple, good implementation available in the standard\nlibrary would be worth having, both for use in the standard library\nitself and elsewhere.\n\nRationale\n\nThe criteria guiding the chosen implementation were:\n\n1. The sentinel objects should behave as expected by a sentinel object:\n When compared using the is operator, it should always be considered\n identical to itself but never to any other object.\n2. Creating a sentinel object should be a simple, straightforward\n one-liner.\n3. It should be simple to define as many distinct sentinel values as\n needed.\n4. The sentinel objects should have a clear and short repr.\n5. It should be possible to use clear type signatures for sentinels.\n6. The sentinel objects should behave correctly after copying and/or\n unpickling.\n7. Such sentinels should work when using CPython 3.x and PyPy3, and\n ideally also with other implementations of Python.\n8. As simple and straightforward as possible, in implementation and\n especially in use. Avoid this becoming one more special thing to\n learn when learning Python. It should be easy to find and use when\n needed, and obvious enough when reading code that one would normally\n not feel a need to look up its documentation.\n\nWith so many uses in the Python standard library[5], it would be useful\nto have an implementation in the standard library, since the stdlib\ncannot use implementations of sentinel objects available elsewhere (such\nas the sentinels[6] or sentinel[7] PyPI packages).\n\nAfter researching existing idioms and implementations, and going through\nmany different possible implementations, an implementation was written\nwhich meets all of these criteria (see Reference Implementation).\n\nSpecification\n\nA new Sentinel class will be added to a new sentinels module. Its\ninitializer will accept a single required argument, the name of the\nsentinel object, and three optional arguments: the repr of the object,\nits boolean value, and the name of its module:\n\n >>> from sentinels import Sentinel\n >>> NotGiven = Sentinel('NotGiven')\n >>> NotGiven\n \n >>> MISSING = Sentinel('MISSING', repr='mymodule.MISSING')\n >>> MISSING\n mymodule.MISSING\n >>> MEGA = Sentinel('MEGA',\n repr='',\n bool_value=False,\n module_name='mymodule')\n \n\nChecking if a value is such a sentinel should be done using the is\noperator, as is recommended for None. Equality checks using == will also\nwork as expected, returning True only when the object is compared with\nitself. Identity checks such as if value is MISSING: should usually be\nused rather than boolean checks such as if value: or if not value:.\n\nSentinel instances are truthy by default, unlike None. This parallels\nthe default for arbitrary classes, as well as the boolean value of\nEllipsis.\n\nThe names of sentinels are unique within each module. When calling\nSentinel() in a module where a sentinel with that name was already\ndefined, the existing sentinel with that name will be returned.\nSentinels with the same name in different modules will be distinct from\neach other.\n\nCreating a copy of a sentinel object, such as by using copy.copy() or by\npickling and unpickling, will return the same object.\n\nType annotations for sentinel values should use\nLiteral[]. For example:\n\n def foo(value: int | Literal[MISSING] = MISSING) -> int:\n ...\n\nThe module_name optional argument should normally not need to be\nsupplied, as Sentinel() will usually be able to recognize the module in\nwhich it was called. module_name should be supplied only in unusual\ncases when this automatic recognition does not work as intended, such as\nperhaps when using Jython or IronPython. This parallels the designs of\nEnum and namedtuple. For more details, see PEP 435.\n\nThe Sentinel class may not be sub-classed, to avoid overly-clever uses\nbased on it, such as attempts to use it as a base for implementing\nsingletons. It is considered important that the addition of Sentinel to\nthe stdlib should add minimal complexity.\n\nOrdering comparisons are undefined for sentinel objects.\n\nBackwards Compatibility\n\nWhile not breaking existing code, adding a new \"sentinels\" stdlib module\ncould cause some confusion with regard to existing modules named\n\"sentinels\", and specifically with the \"sentinels\" package on PyPI.\n\nThe existing \"sentinels\" package on PyPI[8] appears to be abandoned,\nwith the latest release being made on Aug. 2016. Therefore, using this\nname for a new stdlib module seems reasonable.\n\nIf and when this PEP is accepted, it may be worth verifying if this has\nindeed been abandoned, and if so asking to transfer ownership to the\nCPython maintainers to reduce the potential for confusion with the new\nstdlib module.\n\nHow to Teach This\n\nThe normal types of documentation of new stdlib modules and features,\nnamely doc-strings, module docs and a section in \"What's New\", should\nsuffice.\n\nSecurity Implications\n\nThis proposal should have no security implications.\n\nReference Implementation\n\nThe reference implementation is found in a dedicated GitHub repo[9]. A\nsimplified version follows:\n\n _registry = {}\n\n class Sentinel:\n \"\"\"Unique sentinel values.\"\"\"\n\n def __new__(cls, name, repr=None, bool_value=True, module_name=None):\n name = str(name)\n repr = str(repr) if repr else f'<{name.split(\".\")[-1]}>'\n bool_value = bool(bool_value)\n if module_name is None:\n try:\n module_name = \\\n sys._getframe(1).f_globals.get('__name__', '__main__')\n except (AttributeError, ValueError):\n module_name = __name__\n\n registry_key = f'{module_name}-{name}'\n\n sentinel = _registry.get(registry_key, None)\n if sentinel is not None:\n return sentinel\n\n sentinel = super().__new__(cls)\n sentinel._name = name\n sentinel._repr = repr\n sentinel._bool_value = bool_value\n sentinel._module_name = module_name\n\n return _registry.setdefault(registry_key, sentinel)\n\n def __repr__(self):\n return self._repr\n\n def __bool__(self):\n return self._bool_value\n\n def __reduce__(self):\n return (\n self.__class__,\n (\n self._name,\n self._repr,\n self._module_name,\n ),\n )\n\nRejected Ideas\n\nUse NotGiven = object()\n\nThis suffers from all of the drawbacks mentioned in the Rationale\nsection.\n\nAdd a single new sentinel value, such as MISSING or Sentinel\n\nSince such a value could be used for various things in various places,\none could not always be confident that it would never be a valid value\nin some use cases. On the other hand, a dedicated and distinct sentinel\nvalue can be used with confidence without needing to consider potential\nedge-cases.\n\nAdditionally, it is useful to be able to provide a meaningful name and\nrepr for a sentinel value, specific to the context where it is used.\n\nFinally, this was a very unpopular option in the poll[10], with only 12%\nof the votes voting for it.\n\nUse the existing Ellipsis sentinel value\n\nThis is not the original intended use of Ellipsis, though it has become\nincreasingly common to use it to define empty class or function blocks\ninstead of using pass.\n\nAlso, similar to a potential new single sentinel value, Ellipsis can't\nbe as confidently used in all cases, unlike a dedicated, distinct value.\n\nUse a single-valued enum\n\nThe suggested idiom is:\n\n class NotGivenType(Enum):\n NotGiven = 'NotGiven'\n NotGiven = NotGivenType.NotGiven\n\nBesides the excessive repetition, the repr is overly long:\n. A shorter repr can be defined, at\nthe expense of a bit more code and yet more repetition.\n\nFinally, this option was the least popular among the nine options in the\npoll[11], being the only option to receive no votes.\n\nA sentinel class decorator\n\nThe suggested idiom is:\n\n @sentinel(repr='')\n class NotGivenType: pass\n NotGiven = NotGivenType()\n\nWhile this allows for a very simple and clear implementation of the\ndecorator, the idiom is too verbose, repetitive, and difficult to\nremember.\n\nUsing class objects\n\nSince classes are inherently singletons, using a class as a sentinel\nvalue makes sense and allows for a simple implementation.\n\nThe simplest version of this is:\n\n class NotGiven: pass\n\nTo have a clear repr, one would need to use a meta-class:\n\n class NotGiven(metaclass=SentinelMeta): pass\n\n... or a class decorator:\n\n @Sentinel\n class NotGiven: pass\n\nUsing classes this way is unusual and could be confusing. The intention\nof code would be hard to understand without comments. It would also\ncause such sentinels to have some unexpected and undesirable behavior,\nsuch as being callable.\n\nDefine a recommended \"standard\" idiom, without supplying an implementation\n\nMost common existing idioms have significant drawbacks. So far, no idiom\nhas been found that is clear and concise while avoiding these drawbacks.\n\nAlso, in the poll[12] on this subject, the options for recommending an\nidiom were unpopular, with the highest-voted option being voted for by\nonly 25% of the voters.\n\nAdditional Notes\n\n- This PEP and the initial implementation are drafted in a dedicated\n GitHub repo[13].\n\n- For sentinels defined in a class scope, to avoid potential name\n clashes, one should use the fully-qualified name of the variable in\n the module. Only the part of the name after the last period will be\n used for the default repr. For example:\n\n >>> class MyClass:\n ... NotGiven = sentinel('MyClass.NotGiven')\n >>> MyClass.NotGiven\n \n\n- One should be careful when creating sentinels in a function or\n method, since sentinels with the same name created by code in the\n same module will be identical. If distinct sentinel objects are\n needed, make sure to use distinct names.\n\n- There is no single desirable value for the \"truthiness\" of\n sentinels, i.e. their boolean value. It is sometimes useful for the\n boolean value to be True, and sometimes False. Of the built-in\n sentinels in Python, None evaluates to False, while Ellipsis (a.k.a.\n ...) evaluates to True. The desire for this to be set as needed came\n up in discussions as well.\n\n- The boolean value of NotImplemented is True, but using this is\n deprecated since Python 3.9 (doing so generates a deprecation\n warning.) This deprecation is due to issues specific to\n NotImplemented, as described in bpo-35712[14].\n\n- To define multiple, related sentinel values, possibly with a defined\n ordering among them, one should instead use Enum or something\n similar.\n\n- There was a discussion on the typing-sig mailing list[15] about the\n typing for these sentinels, where different options were discussed.\n\nOpen Issues\n\n- Is adding a new stdlib module the right way to go? I could not find\n any existing module which seems like a logical place for this.\n However, adding new stdlib modules should be done judiciously, so\n perhaps choosing an existing module would be preferable even if it\n is not a perfect fit?\n\nFootnotes\n\nCopyright\n\nThis document is placed in the public domain or under the\nCC0-1.0-Universal license, whichever is more permissive.\n\n[1] Python-Dev mailing list: The repr of a sentinel\n\n[2] Python-Dev mailing list: \"The stdlib contains tons of sentinels\"\n\n[3] bpo-44123: Make function parameter sentinel values true singletons\n\n[4] discuss.python.org Poll: Sentinel Values in the Stdlib\n\n[5] Python-Dev mailing list: \"The stdlib contains tons of sentinels\"\n\n[6] The \"sentinels\" package on PyPI\n\n[7] The \"sentinel\" package on PyPI\n\n[8] sentinels package on PyPI\n\n[9] Reference implementation at the taleinat/python-stdlib-sentinels\nGitHub repo\n\n[10] discuss.python.org Poll: Sentinel Values in the Stdlib\n\n[11] discuss.python.org Poll: Sentinel Values in the Stdlib\n\n[12] discuss.python.org Poll: Sentinel Values in the Stdlib\n\n[13] Reference implementation at the taleinat/python-stdlib-sentinels\nGitHub repo\n\n[14] bpo-35712: Make NotImplemented unusable in boolean context\n\n[15] Discussion thread about type signatures for these sentinels on the\ntyping-sig mailing list"},"source":{"kind":"string","value":"python-peps"},"added":{"kind":"string","value":"2024-10-18T13:23:32.139641"},"created":{"kind":"timestamp","value":"2021-06-06T00:00:00","string":"2021-06-06T00:00:00"},"metadata":{"kind":"string","value":"{\n \"license\": \"Public Domain\",\n \"url\": \"https://peps.python.org/pep-0661/\",\n \"authors\": [\n \"Tal Einat\"\n ],\n \"pep_number\": \"0661\",\n \"pandoc_version\": \"3.5\"\n}"}}},{"rowIdx":519,"cells":{"id":{"kind":"string","value":"0710"},"text":{"kind":"string","value":"PEP: 710 Title: Recording the provenance of installed packages Author:\nFridolín Pokorný Sponsor: Donald Stufft\n PEP-Delegate: Paul Moore \nDiscussions-To:\nhttps://discuss.python.org/t/pep-710-recording-the-provenance-of-installed-packages/25428\nStatus: Draft Type: Standards Track Topic: Packaging Content-Type:\ntext/x-rst Created: 27-Mar-2023 Post-History: 03-Dec-2021, 30-Jan-2023,\n14-Mar-2023, 03-Apr-2023,\n\nAbstract\n\nThis PEP describes a way to record the provenance of installed Python\ndistributions. The record is created by an installer and is available to\nusers in the form of a JSON file provenance_url.json in the .dist-info\ndirectory. The mentioned JSON file captures additional metadata to allow\nrecording a URL to a distribution package together with the installed\ndistribution hash. This proposal is built on top of PEP 610 following\nits corresponding\ncanonical PyPA spec and complements\ndirect_url.json with provenance_url.json for when packages are\nidentified by a name, and optionally a version.\n\nMotivation\n\nInstalling a Python Project involves downloading a Distribution Package\nfrom a Package Index and extracting its content to an appropriate place.\nAfter the installation process is done, information about the release\nartifact used as well as its source is generally lost. However, there\nare use cases for keeping records of distributions used for installing\npackages and their provenance.\n\nPython wheels can be built with different compiler flags or supporting\ndifferent wheel tags. In both cases, users might get into a situation in\nwhich multiple wheels might be considered by installers (possibly from\ndifferent package indexes) and immediately finding out which wheel file\nwas actually used during the installation might be helpful. This way,\ndevelopers can use information about wheels to debug issues making sure\nthe desired wheel was actually installed. Another use case could be\ntools reporting software installed, such as tools reporting a SBOM\n(Software Bill of Materials), that might give more accurate reports. Yet\nanother use case could be reconstruction of the Python environment by\npinning each installed package to a specific distribution artifact\nconsumed from a Python package index.\n\nRationale\n\nThe motivation described in this PEP is an extension of Recording the\nDirect URL Origin of installed distributions \nspecification. In addition to recording provenance information for\npackages installed using a direct URL, installers should also do so for\npackages installed by name (and optionally version) from Python package\nindexes.\n\nThe idea described in this PEP originated in a tool called micropipenv\nthat is used to install distribution packages in\ncontainerized environments (see the reported issue\nthoth-station/micropipenv#206). Currently, the assembled containerized\napplication does not implicitly carry information about the provenance\nof installed distribution packages (unless these are installed from full\nURLs and recorded via direct_url.json). This requires container image\nsuppliers to link container images with the corresponding build process,\nits configuration and the application source code for checking\nrequirements files in cases when software present in containerized\nenvironments needs to be audited.\n\nThe subsequent discussion in the Discourse thread also brought up pip's\nnew --report option that can generate a detailed JSON report about the\ninstallation process. This option could help with the provenance problem\nthis PEP approaches. Nevertheless, this option needs to be explicitly\npassed to pip to obtain the provenance information, and includes\nadditional metadata that might not be necessary for checking the\nprovenance (such as Python version requirements of each distribution\npackage). Also, this option is specific to pip as of the writing of this\nPEP.\n\nNote the current spec for recording installed packages\n defines a RECORD file that\nrecords installed files, but not the distribution artifact from which\nthese files were obtained. Auditing installed artifacts can be performed\nbased on matching the entries listed in the RECORD file. However, this\ntechnique requires a pre-computed database of files each artifact\nprovides or a comparison with the actual artifact content. Both\napproaches are relatively expensive and time consuming operations which\ncould be eliminated with the proposed provenance_url.json file.\n\nRecording provenance information for installed distribution packages,\nboth those obtained from direct URLs and by name/version from an index,\ncan simplify auditing Python environments in general, beyond just the\nspecific use case for containerized applications mentioned earlier. A\ncommunity project pip-audit raised their possible interest in\npypa/pip-audit#170.\n\nSpecification\n\nThe keywords “MUST”, “MUST NOT”, “REQUIRED”, “SHOULD”, “SHOULD NOT”,\n“RECOMMENDED”, “MAY”, and “OPTIONAL” in this document are to be\ninterpreted as described in 2119.\n\nThe provenance_url.json file SHOULD be created in the .dist-info\ndirectory by installers when installing a Distribution Package specified\nby name (and optionally by Version Specifier).\n\nThis file MUST NOT be created when installing a distribution package\nfrom a requirement specifying a direct URL reference (including a VCS\nURL).\n\nOnly one of the files provenance_url.json and direct_url.json (from\nRecording the Direct URL Origin of installed distributions\n specification and the corresponding specification\nof the Direct URL Data Structure ),\nmay be present in a given .dist-info directory; installers MUST NOT add\nboth.\n\nThe provenance_url.json JSON file MUST be a dictionary, compliant with\n8259 and UTF-8 encoded.\n\nIf present, it MUST contain exactly two keys. The first MUST be url,\nwith type string. The second key MUST be archive_info with a value\ndefined below.\n\nThe value of the url key MUST be the URL from which the distribution\npackage was downloaded. If a wheel is built from a source distribution,\nthe url value MUST be the URL from which the source distribution was\ndownloaded. If a wheel is downloaded and installed directly, the url\nfield MUST be the URL from which the wheel was downloaded. As in the\nDirect URL\nData Structure specification, the\nurl value MUST be stripped of any sensitive authentication information\nfor security reasons.\n\nThe user:password section of the URL MAY however be composed of\nenvironment variables, matching the following regular expression:\n\n \\$\\{[A-Za-z0-9-_]+\\}(:\\$\\{[A-Za-z0-9-_]+\\})?\n\nAdditionally, the user:password section of the URL MAY be a well-known,\nnon-security sensitive string. A typical example is git in the case of\nan URL such as ssh://git@gitlab.com.\n\nThe value of archive_info MUST be a dictionary with a single key hashes.\nThe value of hashes is a dictionary mapping hash function names to a\nhex-encoded digest of the file referenced by the url value. At least one\nhash MUST be recorded. Multiple hashes MAY be included, and it is up to\nthe consumer to decide what to do with multiple hashes (it may validate\nall of them or a subset of them, or nothing at all).\n\nEach hash MUST be one of the single argument hashes provided by\npy3.11:hashlib.algorithms_guaranteed, excluding sha1 and md5 which MUST\nNOT be used. As of Python 3.11, with shake_128 and shake_256 excluded\nfor being multi-argument, the allowed set of hashes is:\n\n >>> import hashlib\n >>> sorted(hashlib.algorithms_guaranteed - {\"shake_128\", \"shake_256\", \"sha1\", \"md5\"})\n ['blake2b', 'blake2s', 'sha224', 'sha256', 'sha384', 'sha3_224', 'sha3_256', 'sha3_384', 'sha3_512', 'sha512']\n\nEach hash MUST be referenced by the canonical name of the hash, always\nlower case.\n\nHashes sha1 and md5 MUST NOT be present, due to the security limitations\nof these hash algorithms. Conversely, hash sha256 SHOULD be included.\n\nInstallers that cache distribution packages from an index SHOULD keep\ninformation related to the cached distribution artifact, so that the\nprovenance_url.json file can be created even when installing\ndistribution packages from the installer's cache.\n\nBackwards Compatibility\n\nFollowing the packaging:recording-installed-packages specification,\ninstallers may keep additional installer-specific files in the\n.dist-info directory. To make sure this PEP does not cause any backwards\ncompatibility issues, a comprehensive survey of installers and libraries\nfound no current tools that are using a similarly-named file, or other\nmajor feasibility concerns.\n\nThe Wheel specification lists\nfiles that can be present in the .dist-info directory. None of these\nfile names collide with the proposed provenance_url.json file from this\nPEP.\n\nPresence of provenance_url.json in installers and libraries\n\nA comprehensive survey of the existing installers, libraries, and\ndependency managers in the Python ecosystem analyzed the implications of\nadding support for provenance_url.json to each tool. In summary, no\nmajor backwards compatibility issues, conflicts or feasibility blockers\nwere found as of the time of writing of this PEP. More details about the\nsurvey can be found in the Appendix: Survey of installers and libraries\nsection.\n\nCompatibility with direct_url.json\n\nThis proposal does not make any changes to the direct_url.json file\ndescribed in PEP 610 and its corresponding canonical PyPA spec\n.\n\nThe content of provenance_url.json file was designed in a way to\neventually allow installers reuse some of the logic supporting\ndirect_url.json when a direct URL refers to a source archive or a wheel.\n\nThe main difference between the provenance_url.json and direct_url.json\nfiles are the mandatory keys and their values in the provenance_url.json\nfile. This helps make sure consumers of the provenance_url.json file can\nrely on its content, if the file is present in the .dist-info directory.\n\nSecurity Implications\n\nOne of the main security features of the provenance_url.json file is the\nability to audit installed artifacts in Python environments. Tools can\ncheck which Python package indexes were used to install Python\ndistribution\npackages as well as the hash digests of their\nrelease artifacts.\n\nAs an example, we can take the recent compromised dependency chain in\nthe PyTorch incident. The PyTorch index provided a package named\ntorchtriton. An attacker published torchtriton on PyPI, which ran a\nmalicious binary. By checking the URL of the installed Python\ndistribution stated in the provenance_url.json file, tools can\nautomatically check the source of the installed Python distribution. In\ncase of the PyTorch incident, the URL of torchtriton should point to the\nPyTorch index, not PyPI. Tools can help identifying such malicious\nPython distributions installed by checking the installed Python\ndistribution URL. A more exact check can include also the hash of the\ninstalled Python distribution stated in the provenance_url.json file.\nSuch checks on hashes can be helpful for mirrored Python package indexes\nwhere Python distributions are not distinguishable by their source URLs,\nmaking sure only desired Python package distributions are installed.\n\nA malicious actor can intentionally adjust the content of\nprovenance_url.json to possibly hide provenance information of the\ninstalled Python distribution. A security check which would uncover such\nmalicious activity is beyond scope of this PEP as it would require\nmonitoring actions on the filesystem and eventually reviewing user or\nfile permissions.\n\nHow to Teach This\n\nThe provenance_url.json metadata file is intended for tools and is not\ndirectly visible to end users.\n\nExamples\n\nExamples of a valid provenance_url.json\n\nA valid provenance_url.json list multiple hashes:\n\n {\n \"archive_info\": {\n \"hashes\": {\n \"blake2s\": \"fffeaf3d0bd71dc960ca2113af890a2f2198f2466f8cd58ce4b77c1fc54601ff\",\n \"sha256\": \"236bcb61156d76c4b8a05821b988c7b8c35bf0da28a4b614e8d6ab5212c25c6f\",\n \"sha3_256\": \"c856930e0f707266d30e5b48c667a843d45e79bb30473c464e92dfa158285eab\",\n \"sha512\": \"6bad5536c30a0b2d5905318a1592948929fbac9baf3bcf2e7faeaf90f445f82bc2b656d0a89070d8a6a9395761f4793c83187bd640c64b2656a112b5be41f73d\"\n }\n },\n \"url\": \"https://files.pythonhosted.org/packages/07/51/2c0959c5adf988c44d9e1e0d940f5b074516ecc87e96b1af25f59de9ba38/pip-23.0.1-py3-none-any.whl\"\n }\n\nA valid provenance_url.json listing a single hash entry:\n\n {\n \"archive_info\": {\n \"hashes\": {\n \"sha256\": \"236bcb61156d76c4b8a05821b988c7b8c35bf0da28a4b614e8d6ab5212c25c6f\"\n }\n },\n \"url\": \"https://files.pythonhosted.org/packages/07/51/2c0959c5adf988c44d9e1e0d940f5b074516ecc87e96b1af25f59de9ba38/pip-23.0.1-py3-none-any.whl\"\n }\n\nA valid provenance_url.json listing a source distribution which was used\nto build and install a wheel:\n\n {\n \"archive_info\": {\n \"hashes\": {\n \"sha256\": \"8bfe29f17c10e2f2e619de8033a07a224058d96b3bfe2ed61777596f7ffd7fa9\"\n }\n },\n \"url\": \"https://files.pythonhosted.org/packages/1d/43/ad8ae671de795ec2eafd86515ef9842ab68455009d864c058d0c3dcf680d/micropipenv-0.0.1.tar.gz\"\n }\n\nExamples of an invalid provenance_url.json\n\nThe following example includes a hash key in the archive_info dictionary\nas originally designed in the data structure documented in\npackaging:direct-url. The hash key MUST NOT be present to prevent from\nany possible confusion with hashes and additional checks that would be\nrequired to keep hash values in sync.\n\n {\n \"archive_info\": {\n \"hash\": \"sha256=236bcb61156d76c4b8a05821b988c7b8c35bf0da28a4b614e8d6ab5212c25c6f\",\n \"hashes\": {\n \"sha256\": \"236bcb61156d76c4b8a05821b988c7b8c35bf0da28a4b614e8d6ab5212c25c6f\"\n }\n },\n \"url\": \"https://files.pythonhosted.org/packages/07/51/2c0959c5adf988c44d9e1e0d940f5b074516ecc87e96b1af25f59de9ba38/pip-23.0.1-py3-none-any.whl\"\n }\n\nAnother example demonstrates an invalid hash name. The referenced hash\nname does not correspond to the canonical hash names described in this\nPEP and in the Python docs under py3.11:hashlib.hash.name.\n\n {\n \"archive_info\": {\n \"hashes\": {\n \"SHA-256\": \"236bcb61156d76c4b8a05821b988c7b8c35bf0da28a4b614e8d6ab5212c25c6f\"\n }\n },\n \"url\": \"https://files.pythonhosted.org/packages/07/51/2c0959c5adf988c44d9e1e0d940f5b074516ecc87e96b1af25f59de9ba38/pip-23.0.1-py3-none-any.whl\"\n }\n\nThe last example demonstrates a provenance_url.json file with no hashes\navailable for the downloaded artifact:\n\n {\n \"archive_info\": {\n \"hashes\": {}\n }\n \"url\": \"https://files.pythonhosted.org/packages/07/51/2c0959c5adf988c44d9e1e0d940f5b074516ecc87e96b1af25f59de9ba38/pip-23.0.1-py3-none-any.whl\"\n }\n\nExample pip commands and their effect on provenance_url.json and direct_url.json\n\nThese commands generate a direct_url.json file but do not generate a\nprovenance_url.json file. These examples follow examples from Direct\nURL Data Structure specification:\n\n- pip install https://example.com/app-1.0.tgz\n- pip install https://example.com/app-1.0.whl\n- pip install \"git+https://example.com/repo/app.git#egg=app&subdirectory=setup\"\n- pip install ./app\n- pip install file:///home/user/app\n- pip install --editable \"git+https://example.com/repo/app.git#egg=app&subdirectory=setup\"\n (in which case, url will be the local directory where the git\n repository has been cloned to, and dir_info will be present with\n \"editable\": true and no vcs_info will be set)\n- pip install -e ./app\n\nCommands that generate a provenance_url.json file but do not generate a\ndirect_url.json file:\n\n- pip install app\n- pip install app~=2.2.0\n- pip install app --no-index --find-links \"https://example.com/\"\n\nThis behaviour can be tested using changes to pip implemented in the PR\npypa/pip#11865.\n\nReference Implementation\n\nA proof-of-concept for creating the provenance_url.json metadata file\nwhen installing a Python Distribution Package is available in the PR to\npip pypa/pip#11865. It reuses the already available implementation for\nthe direct URL data structure to\nprovide the provenance_url.json metadata file for cases when\ndirect_url.json is not created.\n\nA reference implementation for supporting the provenance_url.json file\nin PDM exists is available in pdm-project/pdm#3013.\n\nA prototype called pip-preserve was developed to demonstrate creation of\nrequirements.txt files considering direct_url.json and\nprovenance_url.json metadata files. This tool mimics the pip freeze\nfunctionality, but the listing of installed packages also includes the\nhashes of the Python distribution artifacts.\n\nTo further support this proposal, pip-sbom demonstrates creation of SBOM\nin the SPDX format. The tool uses information stored in the\nprovenance_url.json file.\n\nRejected Ideas\n\nNaming the file direct_url.json instead of provenance_url.json\n\nTo preserve backwards compatibility with the\nRecording the Direct URL Origin of installed distributions ,\nthe file cannot be named direct_url.json, as per the text of that\nspecification:\n\n This file MUST NOT be created when installing a distribution from an\n other type of requirement (i.e. name plus version specifier).\n\nSuch a change might introduce backwards compatibility issues for\nconsumers of direct_url.json who rely on its presence only when\ndistributions are installed using a direct URL reference.\n\nDeprecating direct_url.json and using only provenance_url.json\n\nFile direct_url.json is already well established by the Direct URL\nData Structure specification and\nis already used by installers. For example, pip uses direct_url.json to\nreport a direct URL reference on pip freeze. Deprecating direct_url.json\nwould require additional changes to the pip freeze implementation in pip\n(see PR fridex/pip#2) and could introduce backwards compatibility issues\nfor already existing direct_url.json consumers.\n\nKeeping the hash key in the archive_info dictionary\n\nDirect URL Data Structure \nspecification discusses the possibility to include the hash key\nalongside the hashes key in the archive_info dictionary. This PEP\nexplicitly does not include the hash key in the provenance_url.json file\nand allows only the hashes key to be present. By doing so we eliminate\npossible redundancy in the file, possible confusion, and any additional\nchecks that would need to be done to make sure the hashes are in sync.\n\nAllowing no hashes stated\n\nFor cases when a wheel file is installed from pip's cache and built\nusing an older version of pip, pip does not record hashes of the\ndownloaded source distributions. As we do not have hashes of these\ndownloaded source distributions, the hashes key in the\nprovenance_url.json file would not contain any entries. In such cases,\npip does not create any provenance_url.json file as the provenance\ninformation is not complete. It is encouraged for consumers to rebuild\nwheels with a newer version of pip in these cases.\n\nMaking the hashes key optional\n\nPEP 610 and its corresponding canonical PyPA spec \nrecommend including the hashes key of the archive_info in the\ndirect_url.json file but it is not required (per the 2119 language):\n\n A hashes key SHOULD be present as a dictionary mapping a hash name to\n a hex encoded digest of the file.\n\nThis PEP requires the hashes key be included in archive_info in the\nprovenance_url.json file if that file is created; per this PEP:\n\n The value of archive_info MUST be a dictionary with a single key\n hashes.\n\nBy doing so, consumers of provenance_url.json can check artifact digests\nwhen the provenance_url.json file is created by installers.\n\nStoring index URL\n\nA possibility was raised for storing the index URL as part of the file\ncontent. This index URL would represent the index configured in pip's\nconfiguration or specified using the --index-url or --extra-index-url\noptions. Storing this information was considered confusing, especially\nwhen using other installation options like --find-links. Since the\nactual index URL is not strictly bound to the location from which the\nwheel file was downloaded, we decided not to store the index URL in the\nprovenance_url.json file.\n\nOpen Issues\n\nAvailability of the provenance_url.json file in Conda\n\nWe would like to get feedback on the provenance_url.json file from the\nConda maintainers. It is not clear whether Conda would like to adopt the\nprovenance_url.json file. Conda already stores provenance related\ninformation (similar to the provenance information proposed in this PEP)\nin JSON files located in the conda-meta directory following its actions\nduring installation.\n\nUsing provenance_url.json in downstream installers\n\nThe proposed provenance_url.json file was meant to be adopted primarily\nby Python installers. Other installers, such as APT or DNF, might record\nthe provenance of the installed downstream Python distributions in their\nown way specific to downstream package management. The proposed file is\nnot expected to be created by these downstream package installers and\nthus they were intentionally left out of this PEP. However, any input by\ndevelopers or maintainers of these installers is valuable to possibly\nenrich the provenance_url.json file with information that would help in\nsome way.\n\nAppendix: Survey of installers and libraries\n\npip\n\nThe function from pip's internal API responsible for installing wheels,\nnamed _install_wheel, does not store any provenance_url.json file in the\n.dist-info directory. Additionally, a prototype introducing the\nmentioned file to pip in pypa/pip#11865 demonstrates incorporating logic\nfor handling the provenance_url.json file in pip's source code.\n\nAs pip is used by some of the tools mentioned below to install Python\npackage distributions, findings for pip apply to these tools, as well as\npip does not allow parametrizing creation of files in the .dist-info\ndirectory in its internal API. Most of the tools mentioned below that\nuse pip invoke pip as a subprocess which has no effect on the eventual\npresence of the provenance_url.json file in the .dist-info directory.\n\ndistlib\n\ndistlib implements low-level functionality to manipulate the dist-info\ndirectory. The database of installed distributions does not use any file\nnamed provenance_url.json, based on the distlib's source code.\n\nPipenv\n\nPipenv uses pip to install Python package distributions. There wasn't\nany additional identified logic that would cause backwards compatibility\nissues when introducing the provenance_url.json file in the .dist-info\ndirectory.\n\ninstaller\n\ninstaller does not create a provenance_url.json file explicitly.\nNevertheless, as per the\nRecording Installed Projects \nspecification, installer allows passing the additional_metadata argument\nto create a file in the .dist-info directory - see the source code. To\navoid any backwards compatibility issues, any library or tool using\ninstaller must not request creating the provenance_url.json file using\nthe mentioned additional_metadata argument.\n\nPoetry\n\nThe installation logic in Poetry depends on the\ninstaller.modern-installer configuration option (see docs).\n\nFor cases when the installer.modern-installer configuration option is\nset to false, Poetry uses pip for installing Python package\ndistributions.\n\nOn the other hand, when installer.modern-installer configuration option\nis set to true, Poetry uses installer to install Python package\ndistributions. As can be seen from the linked sources, there isn't\npassed any additional metadata file named provenance_url.json that would\ncause compatibility issues with this PEP.\n\nConda\n\nConda does not create any provenance_url.json file when Python package\ndistributions are installed.\n\nHatch\n\nHatch uses pip to install project dependencies.\n\nmicropipenv\n\nAs micropipenv is a wrapper on top of pip, it uses pip to install Python\ndistributions, for both lock files as well as for requirements files.\n\nThamos\n\nThamos uses micropipenv to install Python package distributions, hence\nany findings for micropipenv apply for Thamos.\n\nPDM\n\nPDM uses installer to install binary distributions. The only additional\nmetadata file it eventually creates in the .dist-info directory is the\nREFER_TO file.\n\nuv\n\nuv is written in Rust and uses its own installation logic when\ninstalling wheels. It does not create any additional files in the\n.dist-info directory that would collide with the provenance_url.json\nfile naming.\n\nReferences\n\nAcknowledgements\n\nThanks to Dustin Ingram, Brett Cannon, and Paul Moore for the initial\ndiscussion in which this idea originated.\n\nThanks to Donald Stufft, Ofek Lev, and Trishank Kuppusamy for early\nfeedback and support to work on this PEP.\n\nThanks to Gregory P. Smith, Stéphane Bidoul, and C.A.M. Gerlach for\nreviewing this PEP and providing valuable suggestions.\n\nThanks to Seth Michael Larson for providing valuable suggestions and for\nthe proposed pip-sbom prototype.\n\nThanks to Stéphane Bidoul and Chris Jerdonek for PEP 610, and related\nRecording the Direct URL Origin of installed distributions\n and Direct URL Data Structure\n specifications.\n\nThanks to Frost Ming for raising possible concern around storing index\nURL in the provenance_url.json file and initial PEP 710 support in PDM.\n\nLast, but not least, thanks to Donald Stufft for sponsoring this PEP.\n\nCopyright\n\nThis document is placed in the public domain or under the\nCC0-1.0-Universal license, whichever is more permissive."},"source":{"kind":"string","value":"python-peps"},"added":{"kind":"string","value":"2024-10-18T13:23:32.187734"},"created":{"kind":"timestamp","value":"2023-03-27T00:00:00","string":"2023-03-27T00:00:00"},"metadata":{"kind":"string","value":"{\n \"license\": \"Public Domain\",\n \"url\": \"https://peps.python.org/pep-0710/\",\n \"authors\": [\n \"Fridolín Pokorný\"\n ],\n \"pep_number\": \"0710\",\n \"pandoc_version\": \"3.5\"\n}"}}},{"rowIdx":520,"cells":{"id":{"kind":"string","value":"0604"},"text":{"kind":"string","value":"PEP: 604 Title: Allow writing union types as X | Y Author: Philippe\nPRADOS , Maggie Moss Sponsor:\nChris Angelico BDFL-Delegate: Guido van Rossum\n Discussions-To: typing-sig@python.org Status: Final\nType: Standards Track Topic: Typing Created: 28-Aug-2019 Python-Version:\n3.10 Post-History: 28-Aug-2019, 05-Aug-2020\n\npython:types-union\n\nAbstract\n\nThis PEP proposes overloading the | operator on types to allow writing\nUnion[X, Y] as X | Y, and allows it to appear in isinstance and\nissubclass calls.\n\nMotivation\n\nPEP 484 and PEP 526 propose a generic syntax to add typing to variables,\nparameters and function returns. PEP 585 proposes to expose\nparameters to generics at runtime\n<585#parameters-to-generics-are-available-at-runtime>. Mypy[1] accepts a\nsyntax which looks like:\n\n annotation: name_type\n name_type: NAME (args)?\n args: '[' paramslist ']'\n paramslist: annotation (',' annotation)* [',']\n\n- To describe a disjunction (union type), the user must use\n Union[X, Y].\n\nThe verbosity of this syntax does not help with type adoption.\n\nProposal\n\nInspired by Scala[2] and Pike[3], this proposal adds operator\ntype.__or__(). With this new operator, it is possible to write int | str\ninstead of Union[int, str]. In addition to annotations, the result of\nthis expression would then be valid in isinstance() and issubclass():\n\n isinstance(5, int | str)\n issubclass(bool, int | float)\n\nWe will also be able to write t | None or None | t instead of\nOptional[t]:\n\n isinstance(None, int | None)\n isinstance(42, None | int)\n\nSpecification\n\nThe new union syntax should be accepted for function, variable and\nparameter annotations.\n\nSimplified Syntax\n\n # Instead of\n # def f(list: List[Union[int, str]], param: Optional[int]) -> Union[float, str]\n def f(list: List[int | str], param: int | None) -> float | str:\n pass\n\n f([1, \"abc\"], None)\n\n # Instead of typing.List[typing.Union[str, int]]\n typing.List[str | int]\n list[str | int]\n\n # Instead of typing.Dict[str, typing.Union[int, float]]\n typing.Dict[str, int | float]\n dict[str, int | float]\n\nThe existing typing.Union and | syntax should be equivalent.\n\n int | str == typing.Union[int, str]\n\n typing.Union[int, int] == int\n int | int == int\n\nThe order of the items in the Union should not matter for equality.\n\n (int | str) == (str | int)\n (int | str | float) == typing.Union[str, float, int]\n\nOptional values should be equivalent to the new union syntax\n\n None | t == typing.Optional[t]\n\nA new Union.__repr__() method should be implemented.\n\n str(int | list[str])\n # int | list[str]\n\n str(int | int)\n # int\n\nisinstance and issubclass\n\nThe new syntax should be accepted for calls to isinstance and issubclass\nas long as the Union items are valid arguments to isinstance and\nissubclass themselves.\n\n # valid\n isinstance(\"\", int | str)\n\n # invalid\n isinstance(2, list[int]) # TypeError: isinstance() argument 2 cannot be a parameterized generic\n isinstance(1, int | list[int])\n\n # valid\n issubclass(bool, int | float)\n\n # invalid\n issubclass(bool, bool | list[int])\n\nIncompatible changes\n\nIn some situations, some exceptions will not be raised as expected.\n\nIf a metaclass implements the __or__ operator, it will override this:\n\n >>> class M(type):\n ... def __or__(self, other): return \"Hello\"\n ...\n >>> class C(metaclass=M): pass\n ...\n >>> C | int\n 'Hello'\n >>> int | C\n typing.Union[int, __main__.C]\n >>> Union[C, int]\n typing.Union[__main__.C, int]\n\nObjections and responses\n\nFor more details about discussions, see links below:\n\n- Discussion in python-ideas\n- Discussion in typing-sig\n\n1. Add a new operator for Union[type1, type2]?\n\nPROS:\n\n- This syntax can be more readable, and is similar to other languages\n (Scala, ...)\n- At runtime, int|str might return a simple object in 3.10, rather\n than everything that you'd need to grab from importing typing\n\nCONS:\n\n- Adding this operator introduces a dependency between typing and\n builtins\n- Breaks the backport (in that typing can easily be backported but\n core types can't)\n- If Python itself doesn't have to be changed, we'd still need to\n implement it in mypy, Pyre, PyCharm, Pytype, and who knows what else\n (it's a minor change see \"Reference Implementation\")\n\n2. Change only PEP 484 (Type hints) to accept the syntax type1 | type2 ?\n\nPEP 563 (Postponed Evaluation of Annotations) is enough to accept this\nproposition, if we accept to not be compatible with the dynamic\nevaluation of annotations (eval()).\n\n >>> from __future__ import annotations\n >>> def foo() -> int | str: pass\n ...\n >>> eval(foo.__annotations__['return'])\n Traceback (most recent call last):\n File \"\", line 1, in \n File \"\", line 1, in \n TypeError: unsupported operand type(s) for |: 'type' and 'type'\n\n3. Extend isinstance() and issubclass() to accept Union ?\n\n isinstance(x, str | int) ==> \"is x an instance of str or int\"\n\nPROS:\n\n- If they were permitted, then instance checking could use an\n extremely clean-looking notation\n\nCONS:\n\n- Must migrate all of the typing module in builtin\n\nReference Implementation\n\nA new built-in Union type must be implemented to hold the return value\nof t1 | t2, and it must be supported by isinstance() and issubclass().\nThis type can be placed in the types module. Interoperability between\ntypes.Union and typing.Union must be provided.\n\nOnce the Python language is extended, mypy[4] and other type checkers\nwill need to be updated to accept this new syntax.\n\n- A proposed implementation for cpython is here.\n- A proposed implementation for mypy is here.\n\nReferences\n\nCopyright\n\nThis document is placed in the public domain or under the\nCC0-1.0-Universal license, whichever is more permissive.\n\n[1] mypy http://mypy-lang.org/\n\n[2] Scala Union Types\nhttps://dotty.epfl.ch/docs/reference/new-types/union-types.html\n\n[3] Pike http://pike.lysator.liu.se/docs/man/chapter_3.html#3.5\n\n[4] mypy http://mypy-lang.org/"},"source":{"kind":"string","value":"python-peps"},"added":{"kind":"string","value":"2024-10-18T13:23:32.204218"},"created":{"kind":"timestamp","value":"2019-08-28T00:00:00","string":"2019-08-28T00:00:00"},"metadata":{"kind":"string","value":"{\n \"license\": \"Public Domain\",\n \"url\": \"https://peps.python.org/pep-0604/\",\n \"authors\": [\n \"Maggie Moss\",\n \"Philippe PRADOS\"\n ],\n \"pep_number\": \"0604\",\n \"pandoc_version\": \"3.5\"\n}"}}},{"rowIdx":521,"cells":{"id":{"kind":"string","value":"0292"},"text":{"kind":"string","value":"PEP: 292 Title: Simpler String Substitutions Author: Barry Warsaw\n Status: Final Type: Standards Track Content-Type:\ntext/x-rst Created: 18-Jun-2002 Python-Version: 2.4 Post-History:\n18-Jun-2002, 23-Mar-2004, 22-Aug-2004 Replaces: 215\n\nAbstract\n\nThis PEP describes a simpler string substitution feature, also known as\nstring interpolation. This PEP is \"simpler\" in two respects:\n\n1. Python's current string substitution feature (i.e. %-substitution)\n is complicated and error prone. This PEP is simpler at the cost of\n some expressiveness.\n2. PEP 215 proposed an alternative string interpolation feature,\n introducing a new $ string prefix. PEP 292 is simpler than this\n because it involves no syntax changes and has much simpler rules for\n what substitutions can occur in the string.\n\nRationale\n\nPython currently supports a string substitution syntax based on C's\nprintf() '%' formatting character[1]. While quite rich, %-formatting\ncodes are also error prone, even for experienced Python programmers. A\ncommon mistake is to leave off the trailing format character, e.g. the\n's' in \"%(name)s\".\n\nIn addition, the rules for what can follow a % sign are fairly complex,\nwhile the usual application rarely needs such complexity. Most scripts\nneed to do some string interpolation, but most of those use simple\n'stringification' formats, i.e. %s or %(name)s This form should be made\nsimpler and less error prone.\n\nA Simpler Proposal\n\nWe propose the addition of a new class, called Template, which will live\nin the string module. The Template class supports new rules for string\nsubstitution; its value contains placeholders, introduced with the $\ncharacter. The following rules for $-placeholders apply:\n\n1. $$ is an escape; it is replaced with a single $\n2. $identifier names a substitution placeholder matching a mapping key\n of \"identifier\". By default, \"identifier\" must spell a Python\n identifier as defined in[2]. The first non-identifier character\n after the $ character terminates this placeholder specification.\n3. ${identifier} is equivalent to $identifier. It is required when\n valid identifier characters follow the placeholder but are not part\n of the placeholder, e.g. \"${noun}ification\".\n\nIf the $ character appears at the end of the line, or is followed by any\nother character than those described above, a ValueError will be raised\nat interpolation time. Values in mapping are converted automatically to\nstrings.\n\nNo other characters have special meaning, however it is possible to\nderive from the Template class to define different substitution rules.\nFor example, a derived class could allow for periods in the placeholder\n(e.g. to support a kind of dynamic namespace and attribute path lookup),\nor could define a delimiter character other than $.\n\nOnce the Template has been created, substitutions can be performed by\ncalling one of two methods:\n\n- substitute(). This method returns a new string which results when\n the values of a mapping are substituted for the placeholders in the\n Template. If there are placeholders which are not present in the\n mapping, a KeyError will be raised.\n\n- safe_substitute(). This is similar to the substitute() method,\n except that KeyErrors are never raised (due to placeholders missing\n from the mapping). When a placeholder is missing, the original\n placeholder will appear in the resulting string.\n\n Here are some examples:\n\n >>> from string import Template\n >>> s = Template('${name} was born in ${country}')\n >>> print s.substitute(name='Guido', country='the Netherlands')\n Guido was born in the Netherlands\n >>> print s.substitute(name='Guido')\n Traceback (most recent call last):\n [...]\n KeyError: 'country'\n >>> print s.safe_substitute(name='Guido')\n Guido was born in ${country}\n\nThe signature of substitute() and safe_substitute() allows for passing\nthe mapping of placeholders to values, either as a single\ndictionary-like object in the first positional argument, or as keyword\narguments as shown above. The exact details and signatures of these two\nmethods is reserved for the standard library documentation.\n\nWhy $ and Braces?\n\nThe BDFL said it best[3]: \"The $ means \"substitution\" in so many\nlanguages besides Perl that I wonder where you've been. [...] We're\ncopying this from the shell.\"\n\nThus the substitution rules are chosen because of the similarity with so\nmany other languages. This makes the substitution rules easier to teach,\nlearn, and remember.\n\nComparison to PEP 215\n\nPEP 215 describes an alternate proposal for string interpolation. Unlike\nthat PEP, this one does not propose any new syntax for Python. All the\nproposed new features are embodied in a new library module. PEP 215\nproposes a new string prefix representation such as $\"\" which signal to\nPython that a new type of string is present. $-strings would have to\ninteract with the existing r-prefixes and u-prefixes, essentially\ndoubling the number of string prefix combinations.\n\nPEP 215 also allows for arbitrary Python expressions inside the\n$-strings, so that you could do things like:\n\n import sys\n print $\"sys = $sys, sys = $sys.modules['sys']\"\n\nwhich would return:\n\n sys = , sys = \n\nIt's generally accepted that the rules in PEP 215 are safe in the sense\nthat they introduce no new security issues (see PEP 215, \"Security\nIssues\" for details). However, the rules are still quite complex, and\nmake it more difficult to see the substitution placeholder in the\noriginal $-string.\n\nThe interesting thing is that the Template class defined in this PEP is\ndesigned for inheritance and, with a little extra work, it's possible to\nsupport PEP 215's functionality using existing Python syntax.\n\nFor example, one could define subclasses of Template and dict that\nallowed for a more complex placeholder syntax and a mapping that\nevaluated those placeholders.\n\nInternationalization\n\nThe implementation supports internationalization by recording the\noriginal template string in the Template instance's template attribute.\nThis attribute would serve as the lookup key in an gettext-based\ncatalog. It is up to the application to turn the resulting string back\ninto a Template for substitution.\n\nHowever, the Template class was designed to work more intuitively in an\ninternationalized application, by supporting the mixing-in of Template\nand unicode subclasses. Thus an internationalized application could\ncreate an application-specific subclass, multiply inheriting from\nTemplate and unicode, and using instances of that subclass as the\ngettext catalog key. Further, the subclass could alias the special\n__mod__() method to either .substitute() or .safe_substitute() to\nprovide a more traditional string/unicode like %-operator substitution\nsyntax.\n\nReference Implementation\n\nThe implementation[4] has been committed to the Python 2.4 source tree.\n\nReferences\n\nCopyright\n\nThis document has been placed in the public domain.\n\n[1] String Formatting Operations\nhttps://docs.python.org/release/2.6/library/stdtypes.html#string-formatting-operations\n\n[2] Identifiers and Keywords\nhttps://docs.python.org/release/2.6/reference/lexical_analysis.html#identifiers-and-keywords\n\n[3] https://mail.python.org/pipermail/python-dev/2002-June/025652.html\n\n[4] Reference Implementation\nhttp://sourceforge.net/tracker/index.php?func=detail&aid=1014055&group_id=5470&atid=305470"},"source":{"kind":"string","value":"python-peps"},"added":{"kind":"string","value":"2024-10-18T13:23:32.216151"},"created":{"kind":"timestamp","value":"2002-06-18T00:00:00","string":"2002-06-18T00:00:00"},"metadata":{"kind":"string","value":"{\n \"license\": \"Public Domain\",\n \"url\": \"https://peps.python.org/pep-0292/\",\n \"authors\": [\n \"Barry Warsaw\"\n ],\n \"pep_number\": \"0292\",\n \"pandoc_version\": \"3.5\"\n}"}}},{"rowIdx":522,"cells":{"id":{"kind":"string","value":"0756"},"text":{"kind":"string","value":"PEP: 756 Title: Add PyUnicode_Export() and PyUnicode_Import() C\nfunctions Author: Victor Stinner PEP-Delegate: C\nAPI Working Group Discussions-To: https://discuss.python.org/t/63891\nStatus: Draft Type: Standards Track Created: 13-Sep-2024 Python-Version:\n3.14 Post-History: 14-Sep-2024\n\nAbstract\n\nAdd functions to the limited C API version 3.14:\n\n- PyUnicode_Export(): export a Python str object as a Py_buffer view.\n- PyUnicode_Import(): import a Python str object.\n\nOn CPython, PyUnicode_Export() has an O(1) complexity: no memory is\ncopied and no conversion is done.\n\nRationale\n\nPEP 393\n\nPEP 393 \"Flexible String Representation\" changed string internals in\nPython 3.3 to use three formats:\n\n- PyUnicode_1BYTE_KIND: Unicode range [U+0000; U+00ff], UCS-1, 1\n byte/character.\n- PyUnicode_2BYTE_KIND: Unicode range [U+0000; U+ffff], UCS-2, 2\n bytes/character.\n- PyUnicode_4BYTE_KIND: Unicode range [U+0000; U+10ffff], UCS-4, 4\n bytes/character.\n\nA Python str object must always use the most compact format. For\nexample, a string which only contains ASCII characters must use the\nUCS-1 format.\n\nThe PyUnicode_KIND() function can be used to know the format used by a\nstring.\n\nOne of the following functions can be used to access data:\n\n- PyUnicode_1BYTE_DATA() for PyUnicode_1BYTE_KIND.\n- PyUnicode_2BYTE_DATA() for PyUnicode_2BYTE_KIND.\n- PyUnicode_4BYTE_DATA() for PyUnicode_4BYTE_KIND.\n\nTo get the best performance, a C extension should have 3 code paths for\neach of these 3 string native formats.\n\nLimited C API\n\nPEP 393 functions such as PyUnicode_KIND() and PyUnicode_1BYTE_DATA()\nare excluded from the limited C API. It's not possible to write code\nspecialized for UCS formats. A C extension using the limited C API can\nonly use less efficient code paths and string formats.\n\nFor example, the MarkupSafe project has a C extension specialized for\nUCS formats for best performance, and so cannot use the limited C API.\n\nSpecification\n\nAPI\n\nAdd the following API to the limited C API version 3.14:\n\n int32_t PyUnicode_Export(\n PyObject *unicode,\n int32_t requested_formats,\n Py_buffer *view);\n PyObject* PyUnicode_Import(\n const void *data,\n Py_ssize_t nbytes,\n int32_t format);\n\n #define PyUnicode_FORMAT_UCS1 0x01 // Py_UCS1*\n #define PyUnicode_FORMAT_UCS2 0x02 // Py_UCS2*\n #define PyUnicode_FORMAT_UCS4 0x04 // Py_UCS4*\n #define PyUnicode_FORMAT_UTF8 0x08 // char*\n #define PyUnicode_FORMAT_ASCII 0x10 // char* (ASCII string)\n\nThe int32_t type is used instead of int to have a well defined type size\nand not depend on the platform or the compiler. See Avoid C-specific\nTypes for the longer rationale.\n\nPyUnicode_Export()\n\nAPI:\n\n int32_t PyUnicode_Export(\n PyObject *unicode,\n int32_t requested_formats,\n Py_buffer *view)\n\nExport the contents of the unicode string in one of the\nrequested_formats.\n\n- On success, fill view, and return a format (greater than 0).\n- On error, set an exception, and return -1. view is left unchanged.\n\nAfter a successful call to PyUnicode_Export(), the view buffer must be\nreleased by PyBuffer_Release(). The contents of the buffer are valid\nuntil they are released.\n\nThe buffer is read-only and must not be modified.\n\nThe view->len member must be used to get the string length. The buffer\nshould end with a trailing NUL character, but it's not recommended to\nrely on that because of embedded NUL characters.\n\nunicode and view must not be NULL.\n\nAvailable formats:\n\n Constant Identifier Value Description\n ------------------------ ------- -------------------------\n PyUnicode_FORMAT_UCS1 0x01 UCS-1 string (Py_UCS1*)\n PyUnicode_FORMAT_UCS2 0x02 UCS-2 string (Py_UCS2*)\n PyUnicode_FORMAT_UCS4 0x04 UCS-4 string (Py_UCS4*)\n PyUnicode_FORMAT_UTF8 0x08 UTF-8 string (char*)\n PyUnicode_FORMAT_ASCII 0x10 ASCII string (Py_UCS1*)\n\nUCS-2 and UCS-4 use the native byte order.\n\nrequested_formats can be a single format or a bitwise combination of the\nformats in the table above. On success, the returned format will be set\nto a single one of the requested formats.\n\nNote that future versions of Python may introduce additional formats.\n\nNo memory is copied and no conversion is done.\n\nExport complexity\n\nOn CPython, an export has a complexity of O(1): no memory is copied and\nno conversion is done.\n\nTo get the best performance on CPython and PyPy, it's recommended to\nsupport these 4 formats:\n\n (PyUnicode_FORMAT_UCS1 \\\n | PyUnicode_FORMAT_UCS2 \\\n | PyUnicode_FORMAT_UCS4 \\\n | PyUnicode_FORMAT_UTF8)\n\nPyPy uses UTF-8 natively and so the PyUnicode_FORMAT_UTF8 format is\nrecommended. It requires a memory copy, since PyPy str objects can be\nmoved in memory (PyPy uses a moving garbage collector).\n\nPy_buffer format and item size\n\nPy_buffer uses the following format and item size depending on the\nexport format:\n\n Export format Buffer format Item size\n ------------------------ --------------- -----------\n PyUnicode_FORMAT_UCS1 \"B\" 1 byte\n PyUnicode_FORMAT_UCS2 \"=H\" 2 bytes\n PyUnicode_FORMAT_UCS4 \"=I\" 4 bytes\n PyUnicode_FORMAT_UTF8 \"B\" 1 byte\n PyUnicode_FORMAT_ASCII \"B\" 1 byte\n\nPyUnicode_Import()\n\nAPI:\n\n PyObject* PyUnicode_Import(\n const void *data,\n Py_ssize_t nbytes,\n int32_t format)\n\nCreate a Unicode string object from a buffer in a supported format.\n\n- Return a reference to a new string object on success.\n- Set an exception and return NULL on error.\n\ndata must not be NULL. nbytes must be positive or zero.\n\nSee PyUnicode_Export() for the available formats.\n\nUTF-8 format\n\nCPython 3.14 doesn't use the UTF-8 format internally and doesn't support\nexporting a string as UTF-8. The PyUnicode_AsUTF8AndSize() function can\nbe used instead.\n\nThe PyUnicode_FORMAT_UTF8 format is provided for compatibility with\nalternate implementations which may use UTF-8 natively for strings.\n\nASCII format\n\nWhen the PyUnicode_FORMAT_ASCII format is request for export, the\nPyUnicode_FORMAT_UCS1 export format is used for ASCII strings.\n\nThe PyUnicode_FORMAT_ASCII format is mostly useful for\nPyUnicode_Import() to validate that a string only contains ASCII\ncharacters.\n\nSurrogate characters and embedded NUL characters\n\nSurrogate characters are allowed: they can be imported and exported.\n\nEmbedded NUL characters are allowed: they can be imported and exported.\n\nImplementation\n\nhttps://github.com/python/cpython/pull/123738\n\nBackwards Compatibility\n\nThere is no impact on the backward compatibility, only new C API\nfunctions are added.\n\nUsage of PEP 393 C APIs\n\nA code search on PyPI top 7,500 projects (in March 2024) shows that\nthere are many projects importing and exporting UCS formats with the\nregular C API.\n\nPyUnicode_FromKindAndData()\n\n25 projects call PyUnicode_FromKindAndData():\n\n- Cython (3.0.9)\n- Levenshtein (0.25.0)\n- PyICU (2.12)\n- PyICU-binary (2.7.4)\n- PyQt5 (5.15.10)\n- PyQt6 (6.6.1)\n- aiocsv (1.3.1)\n- asyncpg (0.29.0)\n- biopython (1.83)\n- catboost (1.2.3)\n- cffi (1.16.0)\n- mojimoji (0.0.13)\n- mwparserfromhell (0.6.6)\n- numba (0.59.0)\n- numpy (1.26.4)\n- orjson (3.9.15)\n- pemja (0.4.1)\n- pyahocorasick (2.0.0)\n- pyjson5 (1.6.6)\n- rapidfuzz (3.6.2)\n- regex (2023.12.25)\n- srsly (2.4.8)\n- tokenizers (0.15.2)\n- ujson (5.9.0)\n- unicodedata2 (15.1.0)\n\nPyUnicode_4BYTE_DATA()\n\n21 projects call PyUnicode_2BYTE_DATA() and/or PyUnicode_4BYTE_DATA():\n\n- Cython (3.0.9)\n- MarkupSafe (2.1.5)\n- Nuitka (2.1.2)\n- PyICU (2.12)\n- PyICU-binary (2.7.4)\n- PyQt5_sip (12.13.0)\n- PyQt6_sip (13.6.0)\n- biopython (1.83)\n- catboost (1.2.3)\n- cement (3.0.10)\n- cffi (1.16.0)\n- duckdb (0.10.0)\n- mypy (1.9.0)\n- numpy (1.26.4)\n- orjson (3.9.15)\n- pemja (0.4.1)\n- pyahocorasick (2.0.0)\n- pyjson5 (1.6.6)\n- pyobjc-core (10.2)\n- sip (6.8.3)\n- wxPython (4.2.1)\n\nRejected Ideas\n\nReject embedded NUL characters and require trailing NUL character\n\nIn C, it's convenient to have a trailing NUL character. For example, the\nfor (; *str != 0; str++) loop can be used to iterate on characters and\nstrlen() can be used to get a string length.\n\nThe problem is that a Python str object can embed NUL characters.\nExample: \"ab\\0c\". If a string contains an embedded NUL character, code\nrelying on the NUL character to find the string end truncates the\nstring. It can lead to bugs, or even security vulnerabilities. See a\nprevious discussion in the issue Change PyUnicode_AsUTF8() to return\nNULL on embedded null characters.\n\nRejecting embedded NUL characters require to scan the string which has\nan O(n) complexity.\n\nReject surrogate characters\n\nSurrogate characters are characters in the Unicode range [U+D800;\nU+DFFF]. They are disallowed by UTF codecs such as UTF-8. A Python str\nobject can contain arbitrary lone surrogate characters. Example:\n\"\\uDC80\".\n\nRejecting surrogate characters prevents exporting a string which\ncontains such a character. It can be surprising and annoying since the\nPyUnicode_Export() caller doesn't control the string contents.\n\nAllowing surrogate characters allows to export any string and so avoid\nthis issue. For example, the UTF-8 codec can be used with the\nsurrogatepass error handler to encode and decode surrogate characters.\n\nConversions on demand\n\nIt would be convenient to convert formats on demand. For example,\nconvert UCS-1 and UCS-2 to UCS-4 if an export to only UCS-4 is\nrequested.\n\nThe problem is that most users expect an export to require no memory\ncopy and no conversion: an O(1) complexity. It is better to have an API\nwhere all operations have an O(1) complexity.\n\nExport to UTF-8\n\nCPython 3.14 has a cache to encode a string to UTF-8. It is tempting to\nallow exporting to UTF-8.\n\nThe problem is that the UTF-8 cache doesn't support surrogate\ncharacters. An export is expected to provide the whole string content,\nincluding embedded NUL characters and surrogate characters. To export\nsurrogate characters, a different code path using the surrogatepass\nerror handler is needed and each export operation has to allocate a\ntemporary buffer: O(n) complexity.\n\nAn export is expected to have an O(1) complexity, so the idea to export\nUTF-8 in CPython was abadonned.\n\nDiscussions\n\n- https://discuss.python.org/t/63891\n- https://github.com/capi-workgroup/decisions/issues/33\n- https://github.com/python/cpython/issues/119609\n\nCopyright\n\nThis document is placed in the public domain or under the\nCC0-1.0-Universal license, whichever is more permissive."},"source":{"kind":"string","value":"python-peps"},"added":{"kind":"string","value":"2024-10-18T13:23:32.329884"},"created":{"kind":"timestamp","value":"2024-09-13T00:00:00","string":"2024-09-13T00:00:00"},"metadata":{"kind":"string","value":"{\n \"license\": \"Public Domain\",\n \"url\": \"https://peps.python.org/pep-0756/\",\n \"authors\": [\n \"Victor Stinner\"\n ],\n \"pep_number\": \"0756\",\n \"pandoc_version\": \"3.5\"\n}"}}},{"rowIdx":523,"cells":{"id":{"kind":"string","value":"0236"},"text":{"kind":"string","value":"PEP: 236 Title: Back to the __future__ Version: $Revision$\nLast-Modified: $Date$ Author: Tim Peters Status:\nFinal Type: Standards Track Content-Type: text/x-rst Created:\n26-Feb-2001 Python-Version: 2.1 Post-History: 26-Feb-2001\n\nMotivation\n\nFrom time to time, Python makes an incompatible change to the advertised\nsemantics of core language constructs, or changes their accidental\n(implementation-dependent) behavior in some way. While this is never\ndone capriciously, and is always done with the aim of improving the\nlanguage over the long term, over the short term it's contentious and\ndisrupting.\n\nPEP 5, Guidelines for Language Evolution suggests ways to ease the pain,\nand this PEP introduces some machinery in support of that.\n\nPEP 227, Statically Nested Scopes is the first application, and will be\nused as an example here.\n\nIntent\n\n[Note: This is policy, and so should eventually move into PEP 5]\n\nWhen an incompatible change to core language syntax or semantics is\nbeing made:\n\n1. The release C that introduces the change does not change the syntax\n or semantics by default.\n2. A future release R is identified in which the new syntax or\n semantics will be enforced.\n3. The mechanisms described in PEP 230, Warning Framework are used to\n generate warnings, whenever possible, about constructs or operations\n whose meaning may[1] change in release R.\n4. The new future_statement (see below) can be explicitly included in a\n module M to request that the code in module M use the new syntax or\n semantics in the current release C.\n\nSo old code continues to work by default, for at least one release,\nalthough it may start to generate new warning messages. Migration to the\nnew syntax or semantics can proceed during that time, using the\nfuture_statement to make modules containing it act as if the new syntax\nor semantics were already being enforced.\n\nNote that there is no need to involve the future_statement machinery in\nnew features unless they can break existing code; fully backward-\ncompatible additions can-- and should --be introduced without a\ncorresponding future_statement.\n\nSyntax\n\nA future_statement is simply a from/import statement using the reserved\nmodule name __future__:\n\n future_statement: \"from\" \"__future__\" \"import\" feature [\"as\" name]\n (\",\"feature [\"as\" name])*\n\n feature: identifier\n name: identifier\n\nIn addition, all future_statements must appear near the top of the\nmodule. The only lines that can appear before a future_statement are:\n\n- The module docstring (if any).\n- Comments.\n- Blank lines.\n- Other future_statements.\n\nExample:\n\n \"\"\"This is a module docstring.\"\"\"\n\n # This is a comment, preceded by a blank line and followed by\n # a future_statement.\n from __future__ import nested_scopes\n\n from math import sin\n from __future__ import alabaster_weenoblobs # compile-time error!\n # That was an error because preceded by a non-future_statement.\n\nSemantics\n\nA future_statement is recognized and treated specially at compile time:\nchanges to the semantics of core constructs are often implemented by\ngenerating different code. It may even be the case that a new feature\nintroduces new incompatible syntax (such as a new reserved word), in\nwhich case the compiler may need to parse the module differently. Such\ndecisions cannot be pushed off until runtime.\n\nFor any given release, the compiler knows which feature names have been\ndefined, and raises a compile-time error if a future_statement contains\na feature not known to it[2].\n\nThe direct runtime semantics are the same as for any import statement:\nthere is a standard module __future__.py, described later, and it will\nbe imported in the usual way at the time the future_statement is\nexecuted.\n\nThe interesting runtime semantics depend on the specific feature(s)\n\"imported\" by the future_statement(s) appearing in the module.\n\nNote that there is nothing special about the statement:\n\n import __future__ [as name]\n\nThat is not a future_statement; it's an ordinary import statement, with\nno special semantics or syntax restrictions.\n\nExample\n\nConsider this code, in file scope.py:\n\n x = 42\n def f():\n x = 666\n def g():\n print \"x is\", x\n g()\n f()\n\nUnder 2.0, it prints:\n\n x is 42\n\nNested scopes (PEP 227) are being introduced in 2.1. But under 2.1, it\nstill prints:\n\n x is 42\n\nand also generates a warning.\n\nIn 2.2, and also in 2.1 if from __future__ import nested_scopes is\nincluded at the top of scope.py, it prints:\n\n x is 666\n\nStandard Module __future__.py\n\nLib/__future__.py is a real module, and serves three purposes:\n\n1. To avoid confusing existing tools that analyze import statements and\n expect to find the modules they're importing.\n2. To ensure that future_statements run under releases prior to 2.1 at\n least yield runtime exceptions (the import of __future__ will fail,\n because there was no module of that name prior to 2.1).\n3. To document when incompatible changes were introduced, and when they\n will be-- or were --made mandatory. This is a form of executable\n documentation, and can be inspected programmatically via importing\n __future__ and examining its contents.\n\nEach statement in __future__.py is of the form:\n\n FeatureName = \"_Feature(\" OptionalRelease \",\" MandatoryRelease \")\"\n\nwhere, normally, OptionalRelease < MandatoryRelease, and both are\n5-tuples of the same form as sys.version_info:\n\n (PY_MAJOR_VERSION, # the 2 in 2.1.0a3; an int\n PY_MINOR_VERSION, # the 1; an int\n PY_MICRO_VERSION, # the 0; an int\n PY_RELEASE_LEVEL, # \"alpha\", \"beta\", \"candidate\" or \"final\"; string\n PY_RELEASE_SERIAL # the 3; an int )\n\nOptionalRelease records the first release in which:\n\n from __future__ import FeatureName\n\nwas accepted.\n\nIn the case of MandatoryReleases that have not yet occurred,\nMandatoryRelease predicts the release in which the feature will become\npart of the language.\n\nElse MandatoryRelease records when the feature became part of the\nlanguage; in releases at or after that, modules no longer need:\n\n from __future__ import FeatureName\n\nto use the feature in question, but may continue to use such imports.\n\nMandatoryRelease may also be None, meaning that a planned feature got\ndropped.\n\nInstances of class _Feature have two corresponding methods,\n.getOptionalRelease() and .getMandatoryRelease().\n\nNo feature line will ever be deleted from __future__.py.\n\nExample line:\n\n nested_scopes = _Feature((2, 1, 0, \"beta\", 1), (2, 2, 0, \"final\", 0))\n\nThis means that:\n\n from __future__ import nested_scopes\n\nwill work in all releases at or after 2.1b1, and that nested_scopes are\nintended to be enforced starting in release 2.2.\n\nResolved Problem: Runtime Compilation\n\nSeveral Python features can compile code during a module's runtime:\n\n1. The exec statement.\n2. The execfile() function.\n3. The compile() function.\n4. The eval() function.\n5. The input() function.\n\nSince a module M containing a future_statement naming feature F\nexplicitly requests that the current release act like a future release\nwith respect to F, any code compiled dynamically from text passed to one\nof these from within M should probably also use the new syntax or\nsemantics associated with F. The 2.1 release does behave this way.\n\nThis isn't always desired, though. For example, doctest.testmod(M)\ncompiles examples taken from strings in M, and those examples should use\nM's choices, not necessarily the doctest module's choices. In the 2.1\nrelease, this isn't possible, and no scheme has yet been suggested for\nworking around this. NOTE: PEP 264 later addressed this in a flexible\nway, by adding optional arguments to compile().\n\nIn any case, a future_statement appearing \"near the top\" (see Syntax\nabove) of text compiled dynamically by an exec, execfile() or compile()\napplies to the code block generated, but has no further effect on the\nmodule that executes such an exec, execfile() or compile(). This can't\nbe used to affect eval() or input(), however, because they only allow\nexpression input, and a future_statement is not an expression.\n\nResolved Problem: Native Interactive Shells\n\nThere are two ways to get an interactive shell:\n\n1. By invoking Python from a command line without a script argument.\n2. By invoking Python from a command line with the -i switch and with a\n script argument.\n\nAn interactive shell can be seen as an extreme case of runtime\ncompilation (see above): in effect, each statement typed at an\ninteractive shell prompt runs a new instance of exec, compile() or\nexecfile(). A future_statement typed at an interactive shell applies to\nthe rest of the shell session's life, as if the future_statement had\nappeared at the top of a module.\n\nResolved Problem: Simulated Interactive Shells\n\nInteractive shells \"built by hand\" (by tools such as IDLE and the Emacs\nPython-mode) should behave like native interactive shells (see above).\nHowever, the machinery used internally by native interactive shells has\nnot been exposed, and there isn't a clear way for tools building their\nown interactive shells to achieve the desired behavior.\n\nNOTE: PEP 264 later addressed this, by adding intelligence to the\nstandard codeop.py. Simulated shells that don't use the standard library\nshell helpers can get a similar effect by exploiting the new optional\narguments to compile() added by PEP 264.\n\nQuestions and Answers\n\nWhat about a \"from __past__\" version, to get back old behavior?\n\nOutside the scope of this PEP. Seems unlikely to the author, though.\nWrite a PEP if you want to pursue it.\n\nWhat about incompatibilities due to changes in the Python virtual machine?\n\nOutside the scope of this PEP, although PEP 5 suggests a grace period\nthere too, and the future_statement may also have a role to play there.\n\nWhat about incompatibilities due to changes in Python's C API?\n\nOutside the scope of this PEP.\n\nI want to wrap future_statements in try/except blocks, so I can use different code depending on which version of Python I'm running. Why can't I?\n\nSorry! try/except is a runtime feature; future_statements are primarily\ncompile-time gimmicks, and your try/except happens long after the\ncompiler is done. That is, by the time you do try/except, the semantics\nin effect for the module are already a done deal. Since the try/except\nwouldn't accomplish what it looks like it should accomplish, it's simply\nnot allowed. We also want to keep these special statements very easy to\nfind and to recognize.\n\nNote that you can import __future__ directly, and use the information in\nit, along with sys.version_info, to figure out where the release you're\nrunning under stands in relation to a given feature's status.\n\nGoing back to the nested_scopes example, what if release 2.2 comes along and I still haven't changed my code? How can I keep the 2.1 behavior then?\n\nBy continuing to use 2.1, and not moving to 2.2 until you do change your\ncode. The purpose of future_statement is to make life easier for people\nwho keep current with the latest release in a timely fashion. We don't\nhate you if you don't, but your problems are much harder to solve, and\nsomebody with those problems will need to write a PEP addressing them.\nfuture_statement is aimed at a different audience.\n\nOverloading import sucks. Why not introduce a new statement for this?\n\nLike maybe lambda lambda nested_scopes? That is, unless we introduce a\nnew keyword, we can't introduce an entirely new statement. But if we\nintroduce a new keyword, that in itself would break old code. That would\nbe too ironic to bear. Yes, overloading import does suck, but not as\nenergetically as the alternatives -- as is, future_statements are 100%\nbackward compatible.\n\nCopyright\n\nThis document has been placed in the public domain.\n\nReferences and Footnotes\n\n[1] Note that this is may and not will: better safe than sorry. Of\ncourse spurious warnings won't be generated when avoidable with\nreasonable cost.\n\n[2] This ensures that a future_statement run under a release prior to\nthe first one in which a given feature is known (but >= 2.1) will raise\na compile-time error rather than silently do a wrong thing. If\ntransported to a release prior to 2.1, a runtime error will be raised\nbecause of the failure to import __future__ (no such module existed in\nthe standard distribution before the 2.1 release, and the double\nunderscores make it a reserved name)."},"source":{"kind":"string","value":"python-peps"},"added":{"kind":"string","value":"2024-10-18T13:23:32.348918"},"created":{"kind":"timestamp","value":"2001-02-26T00:00:00","string":"2001-02-26T00:00:00"},"metadata":{"kind":"string","value":"{\n \"license\": \"Public Domain\",\n \"url\": \"https://peps.python.org/pep-0236/\",\n \"authors\": [\n \"Tim Peters\"\n ],\n \"pep_number\": \"0236\",\n \"pandoc_version\": \"3.5\"\n}"}}},{"rowIdx":524,"cells":{"id":{"kind":"string","value":"0540"},"text":{"kind":"string","value":"PEP: 540 Title: Add a new UTF-8 Mode Version: $Revision$ Last-Modified:\n$Date$ Author: Victor Stinner BDFL-Delegate: INADA\nNaoki Status: Final Type: Standards Track Content-Type: text/x-rst\nCreated: 05-Jan-2016 Python-Version: 3.7 Resolution:\nhttps://mail.python.org/pipermail/python-dev/2017-December/151173.html\n\nAbstract\n\nAdd a new \"UTF-8 Mode\" to enhance Python's use of UTF-8. When UTF-8 Mode\nis active, Python will:\n\n- use the utf-8 encoding, regardless of the locale currently set by\n the current platform, and\n- change the stdin and stdout error handlers to surrogateescape.\n\nThis mode is off by default, but is automatically activated when using\nthe \"POSIX\" locale.\n\nAdd the -X utf8 command line option and PYTHONUTF8 environment variable\nto control UTF-8 Mode.\n\nRationale\n\nLocale encoding and UTF-8\n\nPython 3.6 uses the locale encoding for filenames, environment\nvariables, standard streams, etc. The locale encoding is inherited from\nthe locale; the encoding and the locale are tightly coupled.\n\nMany users inherit the ASCII encoding from the POSIX locale, aka the \"C\"\nlocale, but are unable change the locale for various reasons. This\nencoding is very limited in term of Unicode support: any non-ASCII\ncharacter is likely to cause trouble.\n\nIt isn't always easy to get an accurate locale. Locales don't get the\nexact same name on different Linux distributions, FreeBSD, macOS, etc.\nAnd some locales, like the recent C.UTF-8 locale, are only supported by\na few platforms. The current locale can even vary on the same platform\ndepending on context; for example, a SSH connection can use a different\nencoding than the filesystem or local terminal encoding on the same\nmachine.\n\nOn the flip side, Python 3.6 is already using UTF-8 by default on macOS,\nAndroid and Windows (PEP 529) for most functions -- although open() is a\nnotable exception here. UTF-8 is also the default encoding of Python\nscripts, XML and JSON file formats. The Go programming language uses\nUTF-8 for all strings.\n\nUTF-8 support is nearly ubiquitous for data read and written by modern\nplatforms. It also has excellent support in Python. The problem is\nsimply that the locale is frequently misconfigured. An obvious solution\nsuggests itself: ignore the locale encoding and use UTF-8.\n\nPassthrough for undecodable bytes: surrogateescape\n\nWhen decoding bytes from UTF-8 using the default strict error handler,\nPython 3 raises a UnicodeDecodeError on the first undecodable byte.\n\nUnix command line tools like cat or grep and most Python 2 applications\nsimply do not have this class of bugs: they don't decode data, but\nprocess data as a raw bytes sequence.\n\nPython 3 already has a solution to behave like Unix tools and Python 2:\nthe surrogateescape error handler (PEP 383). It allows processing data\nas if it were bytes, but uses Unicode in practice; undecodable bytes are\nstored as surrogate characters.\n\nUTF-8 Mode sets the surrogateescape error handler for stdin and stdout,\nsince these streams as commonly associated to Unix command line tools.\n\nHowever, users have a different expectation on files. Files are expected\nto be properly encoded, and Python is expected to fail early when open()\nis called with the wrong options, like opening a JPEG picture in text\nmode. The open() default error handler remains strict for these reasons.\n\nNo change by default for best backward compatibility\n\nWhile UTF-8 is perfect in most cases, sometimes the locale encoding is\nactually the best encoding.\n\nThis PEP changes the behaviour for the POSIX locale since this locale is\nusually equivalent to the ASCII encoding, whereas UTF-8 is a much better\nchoice. It does not change the behaviour for other locales to prevent\nany risk or regression.\n\nAs users are responsible to enable explicitly the new UTF-8 Mode for\nthese other locales, they are responsible for any potential mojibake\nissues caused by UTF-8 Mode.\n\nProposal\n\nAdd a new UTF-8 Mode to use the UTF-8 encoding, ignore the locale\nencoding, and change stdin and stdout error handlers to surrogateescape.\n\nAdd the new -X utf8 command line option and PYTHONUTF8 environment\nvariable. Users can explicitly activate UTF-8 Mode with the command-line\noption -X utf8 or by setting the environment variable PYTHONUTF8=1.\n\nThis mode is disabled by default and enabled by the POSIX locale. Users\ncan explicitly disable UTF-8 Mode with the command-line option -X utf8=0\nor by setting the environment variable PYTHONUTF8=0.\n\nFor standard streams, the PYTHONIOENCODING environment variable has\npriority over UTF-8 Mode.\n\nOn Windows, the PYTHONLEGACYWINDOWSFSENCODING environment variable (PEP\n529) has the priority over UTF-8 Mode.\n\nEffects of UTF-8 Mode:\n\n- sys.getfilesystemencoding() returns 'UTF-8'.\n- locale.getpreferredencoding() returns UTF-8; its do_setlocale\n argument, and the locale encoding, are ignored.\n- sys.stdin and sys.stdout error handler is set to surrogateescape.\n\nSide effects:\n\n- open() uses the UTF-8 encoding by default. However, it still uses\n the strict error handler by default.\n- os.fsdecode() and os.fsencode() use the UTF-8 encoding.\n- Command line arguments, environment variables and filenames use the\n UTF-8 encoding.\n\nRelationship with the locale coercion (PEP 538)\n\nThe POSIX locale enables the locale coercion (PEP 538) and the UTF-8\nmode (PEP 540). When the locale coercion is enabled, enabling the UTF-8\nmode has no additional effect.\n\nThe UTF-8 Mode has the same effect as locale coercion:\n\n- sys.getfilesystemencoding() returns 'UTF-8',\n- locale.getpreferredencoding() returns UTF-8, and\n- the sys.stdin and sys.stdout error handlers are set to\n surrogateescape.\n\nThese changes only affect Python code. But the locale coercion has\nadditional effects: the LC_CTYPE environment variable and the LC_CTYPE\nlocale are set to a UTF-8 locale like C.UTF-8. One side effect is that\nnon-Python code is also impacted by the locale coercion. The two PEPs\nare complementary.\n\nOn platforms like Centos 7 where locale coercion is not supported, the\nPOSIX locale only enables UTF-8 Mode. In this case, Python code uses the\nUTF-8 encoding and ignores the locale encoding, whereas non-Python code\nuses the locale encoding, which is usually ASCII for the POSIX locale.\n\nWhile the UTF-8 Mode is supported on all platforms and can be enabled\nwith any locale, the locale coercion is not supported by all platforms\nand is restricted to the POSIX locale.\n\nThe UTF-8 Mode has only an impact on Python child processes when the\nPYTHONUTF8 environment variable is set to 1, whereas the locale coercion\nsets the LC_CTYPE environment variables which impacts all child\nprocesses.\n\nThe benefit of the locale coercion approach is that it helps ensure that\nencoding handling in binary extension modules and child processes is\nconsistent with Python's encoding handling. The upside of the UTF-8 Mode\napproach is that it allows an embedding application to change the\ninterpreter's behaviour without having to change the process global\nlocale settings.\n\nBackward Compatibility\n\nThe only backward incompatible change is that the POSIX locale now\nenables the UTF-8 Mode by default: it will now use the UTF-8 encoding,\nignore the locale encoding, and change stdin and stdout error handlers\nto surrogateescape.\n\nAnnex: Encodings And Error Handlers\n\nUTF-8 Mode changes the default encoding and error handler used by\nopen(), os.fsdecode(), os.fsencode(), sys.stdin, sys.stdout and\nsys.stderr.\n\nEncoding and error handler\n\n Function Default UTF-8 Mode or POSIX locale\n ------------------------------ ------------------------- ----------------------------\n open() locale/strict UTF-8/strict\n os.fsdecode(), os.fsencode() locale/surrogateescape UTF-8/surrogateescape\n sys.stdin, sys.stdout locale/strict UTF-8/surrogateescape\n sys.stderr locale/backslashreplace UTF-8/backslashreplace\n\nBy comparison, Python 3.6 uses:\n\n Function Default POSIX locale\n ------------------------------ ------------------------- ----------------------------\n open() locale/strict locale/strict\n os.fsdecode(), os.fsencode() locale/surrogateescape locale/surrogateescape\n sys.stdin, sys.stdout locale/strict locale/**surrogateescape**\n sys.stderr locale/backslashreplace locale/backslashreplace\n\nEncoding and error handler on Windows\n\nOn Windows, the encodings and error handlers are different:\n\n Function Default Legacy Windows FS encoding UTF-8 Mode\n ------------------------------ ------------------------ ---------------------------- ------------------------\n open() mbcs/strict mbcs/strict UTF-8/strict\n os.fsdecode(), os.fsencode() UTF-8/surrogatepass mbcs/replace UTF-8/surrogatepass\n sys.stdin, sys.stdout UTF-8/surrogateescape UTF-8/surrogateescape UTF-8/surrogateescape\n sys.stderr UTF-8/backslashreplace UTF-8/backslashreplace UTF-8/backslashreplace\n\nBy comparison, Python 3.6 uses:\n\n Function Default Legacy Windows FS encoding\n ------------------------------ ------------------------ ----------------------------\n open() mbcs/strict mbcs/strict\n os.fsdecode(), os.fsencode() UTF-8/surrogatepass mbcs/replace\n sys.stdin, sys.stdout UTF-8/surrogateescape UTF-8/surrogateescape\n sys.stderr UTF-8/backslashreplace UTF-8/backslashreplace\n\nThe \"Legacy Windows FS encoding\" is enabled by the\nPYTHONLEGACYWINDOWSFSENCODING environment variable.\n\nIf stdin and/or stdout is redirected to a pipe, sys.stdin and/or\nsys.output uses mbcs encoding by default rather than UTF-8. But in UTF-8\nMode, sys.stdin and sys.stdout always use the UTF-8 encoding.\n\nNote\n\nThere is no POSIX locale on Windows. The ANSI code page is used as the\nlocale encoding, and this code page never uses the ASCII encoding.\n\nLinks\n\n- bpo-29240: Implementation of the PEP 540: Add a new UTF-8 Mode\n- PEP 538: \"Coercing the legacy C locale to C.UTF-8\"\n- PEP 529: \"Change Windows filesystem encoding to UTF-8\"\n- PEP 528: \"Change Windows console encoding to UTF-8\"\n- PEP 383: \"Non-decodable Bytes in System Character Interfaces\"\n\nPost History\n\n- 2017-12: [Python-Dev] PEP 540: Add a new UTF-8 Mode\n- 2017-04: [Python-Dev] Proposed BDFL Delegate update for PEPs 538 &\n 540 (assuming UTF-8 for *nix system boundaries)\n- 2017-01: [Python-ideas] PEP 540: Add a new UTF-8 Mode\n- 2017-01: bpo-28180: Implementation of the PEP 538: coerce C locale\n to C.utf-8 (msg284764)\n- 2016-08-17: bpo-27781: Change sys.getfilesystemencoding() on Windows\n to UTF-8 (msg272916) -- Victor proposed -X utf8 for the PEP 529\n (Change Windows filesystem encoding to UTF-8)\n\nVersion History\n\n- Version 4: locale.getpreferredencoding() now returns 'UTF-8' in the\n UTF-8 Mode.\n- Version 3: The UTF-8 Mode does not change the open() default error\n handler (strict) anymore, and the Strict UTF-8 Mode has been\n removed.\n- Version 2: Rewrite the PEP from scratch to make it much shorter and\n easier to understand.\n- Version 1: First version posted to python-dev.\n\nCopyright\n\nThis document has been placed in the public domain."},"source":{"kind":"string","value":"python-peps"},"added":{"kind":"string","value":"2024-10-18T13:23:32.374376"},"created":{"kind":"timestamp","value":"2016-01-05T00:00:00","string":"2016-01-05T00:00:00"},"metadata":{"kind":"string","value":"{\n \"license\": \"Public Domain\",\n \"url\": \"https://peps.python.org/pep-0540/\",\n \"authors\": [\n \"Victor Stinner\"\n ],\n \"pep_number\": \"0540\",\n \"pandoc_version\": \"3.5\"\n}"}}},{"rowIdx":525,"cells":{"id":{"kind":"string","value":"0595"},"text":{"kind":"string","value":"PEP: 595 Title: Improving bugs.python.org Author: Ezio Melotti\n, Berker Peksag \nBDFL-Delegate: Barry Warsaw Status: Withdrawn Type:\nInformational Content-Type: text/x-rst Created: 12-May-2019\n\nAbstract\n\nThis PEP proposes a list of improvements to make bugs.python.org more\nusable for contributors and core developers. This PEP also discusses why\nremaining on Roundup should be preferred over switching to GitHub\nIssues, as proposed by PEP 581.\n\nResolution\n\n2020-06-25: With the acceptance of PEP 581, the move to GitHub for\nissues is proceeding, this PEP is being marked as a withdrawn\ninformational PEP.\n\nMotivation\n\nOn May 14th, 2019 PEP 581 has been accepted without much public\ndiscussion and without a clear consensus. The PEP contains factual\nerrors and doesn't address some of the issues that the migration to\nGitHub Issues might present.\n\nGiven the scope of the migration, the amount of work required, and how\nit will negatively affect the workflow during the transition phase, this\ndecision should be re-evaluated.\n\nRoundup advantages over GitHub Issues\n\nThis section discusses reasons why Roundup should be preferred over\nGitHub Issues and Roundup features that are not available on GitHub\nIssues.\n\n- Roundup is the status quo. Roundup has been an integral part of the\n CPython workflow for years. It is a stable product that has been\n tested and customized to adapt to our needs as the workflow evolved.\n\n It is possible to gradually improve it and avoid the disruption that\n a switch to a different system would inevitably bring to the\n workflow.\n\n- Open-source and Python powered. Roundup is an open-source project\n and is written in Python. By using it and supporting it, we also\n support the Python ecosystem. Several features developed for bpo\n have also been ported to upstream Roundup over the years.\n\n- Fully customizable. Roundup can be (and has been) fully customized\n to fit our needs.\n\n- Finer-grained access control. Roundup allows the creation of\n different roles with different permissions (e.g. create, view, edit,\n etc.) for each individual property, and users can have multiple\n roles.\n\n- Flexible UI. While Roundup UI might look dated, it is convenient and\n flexible.\n\n For example, on the issue page, each field (e.g. title, type,\n versions, status, linked files and PRs, etc.) have appropriate UI\n elements (input boxes, dropdowns, tables, etc.) that are easy to set\n and also provide a convenient way to get info about the issue at a\n glance. The number of fields, their values, and the UI element they\n use is also fully customizable. GitHub only provides labels.\n\n The issue list page presents the issues in a compact and easy to\n read table with separate columns for different fields. For\n comparison, Roundup lists 50 issues in a screen, whereas GitHub\n takes two screens to shows 25 issues.\n\n- Advanced search. Roundup provides an accurate way to search and\n filter by using any combination of issue fields. It is also possible\n to customize the number of results and the fields displayed in the\n table, and the sorting and grouping (up to two levels).\n\n bpo also provides predefined summaries (e.g. \"Created by you\",\n \"Assigned to you\", etc.) and allows the creation of custom search\n queries that can be conveniently accessed from the sidebar.\n\n- Nosy list autocomplete. The nosy list has an autocomplete feature\n that suggests maintainers and experts. The suggestions are\n automatically updated when the experts index changes.\n\n- Dependencies and Superseders. Roundup allows to specify dependencies\n that must be addressed before the current issues can be closed and a\n superseder issue to easily mark duplicates (for example, bpo-12078).\n The list of dependencies can also be used to create meta-issues that\n references several other sub-issues (for example, bpo-26865).\n\nImproving Roundup\n\nThis section lists some of the issues mentioned by PEP 581 and other\ndesired features and discusses how they can be implemented by improving\nRoundup and/or our instance.\n\n- REST API support. A REST API will make integration with other\n services and the development of new tools and applications easier.\n\n Upstream Roundup now supports a REST API. Updating the tracker will\n make the REST API available.\n\n- GitHub login support. This will allow users to login to\n bugs.python.org (bpo) without having to create a new account. It\n will also solve issues with confirmation emails being marked as\n spam, and provide two-factor authentication.\n\n A patch to add this functionality is already available and is being\n integrated at the time of writing.\n\n- Markdown support and message preview and editing. This feature will\n allow the use of Markdown in messages and the ability to preview the\n message before the submission and edit it afterward.\n\n This can be done, but it will take some work. Possible solutions\n have been proposed on the roundup-devel mailing list.\n\n- \"Remove me from nosy list\" button. Add a button on issue pages to\n remove self from the nosy list.\n\n This feature will be added during GSoC 2019.\n\n- Mobile friendly theme. Current theme of bugs.python.org looks dated\n and it doesn't work well with mobile browsers.\n\n A mobile-friendly theme that is more modern but still familiar will\n be added.\n\n- Move reply box close to the last message. The reply box is located\n at the top of the page, whereas the last message is at the bottom.\n\n The reply box can be moved or duplicated after the last message.\n\n- Real-time updates. When another users submits changes to an issue,\n they should show up in real time.\n\n This can be accomplished by using the REST API.\n\n- Add PR link to BPO emails. Currently bpo emails don't include links\n to the corresponding PRs.\n\n A patch is available to change the content of the bpo emails from:\n\n components: +Tkinter\n versions: +Python 3.4\n pull_requests: +42\n\n to:\n\n components: +Tkinter\n versions: +Python 3.4\n pull_request: https://github.com/python/cpython/pull/341\n\n- Python 3 support. Using Python 3 will make maintenance easier.\n\n Upstream Roundup now supports Python 3. Updating the tracker will\n allow us to switch to Python 3. The instances will need to be\n updated as well.\n\n- Use upstream Roundup. We currently use a fork of Roundup with a few\n modifications, most notably the GitHub integration. If this is\n ported upstream, we can start using upstream Roundup without having\n to maintain our fork.\n\nPEP 581 issues\n\nThis section addresses some errors and inaccuracies found in PEP 581.\n\nThe \"Why GitHub?\" section of PEP 581 lists features currently available\non GitHub Issues but not on Roundup. Some of this features are currently\nsupported:\n\n- \"Ability to reply to issue and pull request conversations via\n email.\"\n - Being able to reply by email has been one of the core features\n of Roundup since the beginning. It is also possible to create\n new issues or close existing ones, set or modify fields, and add\n attachments.\n- \"Email notifications containing metadata, integrated with Gmail,\n allowing systematic filtering of emails.\"\n - Emails sent by Roundup contains metadata that can be used for\n filtering.\n- \"Additional privacy, such as offering the user a choice to hide an\n email address, while still allowing communication with the user\n through @-mentions.\"\n - Email addresses are hidden by default to users that are not\n registered. Registered users can see other users' addresses\n because we configured the tracker to show them. It can easily be\n changed if desired. Users can still be added to the nosy list by\n using their username even if their address is hidden.\n- \"Ability to automatically close issues when a PR has been merged.\"\n - The GitHub integration of Roundup automatically closes issues\n when a commit that contains \"fixes issue \" is merged.\n (Alternative spellings such as \"closes\" or \"bug\" are also\n supported.) See this message for a recent example of this\n feature.\n- \"Support for permalinks, allowing easy quoting and copying & pasting\n of source code.\"\n - Roundup has permalinks for issues, messages, attachments, etc.\n In addition, Roundup allows to easily rewrite broken URLs in\n messages (e.g. if the code hosting changes).\n- \"Core developers, volunteers, and the PSF don't have to maintain the\n issue infrastructure/site, giving us more time and resources to\n focus on the development of Python.\"\n - While this is partially true, additional resources are required\n to write and maintain bots.\n\n In some cases, bots are required to workaround GitHub's lack of\n features rather than expanding. This webhook was written\n specifically to workaround GitHub's email integration.\n\n Updating our bots to stay up-to-date with changes in the GitHub\n API has also maintenance cost. This recent incident caused by\n GitHub took two days to be fixed.\n\n In addition, we will still need to maintain Roundup for bpo\n (even if it becomes read-only) and for the other trackers we\n currently host/maintain (Jython and Roundup).\n\nThe \"Issues with Roundup / bpo\" section of PEP 581 lists some issues\nthat have already been fixed:\n\n- \"The upstream Roundup code is in Mercurial. Without any CI\n available, it puts heavy burden on the few existing maintainers in\n terms of reviewing, testing, and applying patches.\"\n - While Roundup uses Mercurial by default, there is a git clone\n available on GitHub. Roundup also has CI available on Travis CI\n and Codecov.\n- \"There is no REST API available. There is an open issue in Roundup\n for adding REST API. Last activity was in 2016.\"\n - The REST API has been integrated and it's now available in\n Roundup.\n- \"Users email addresses are exposed. There is no option to mask it.\"\n - Exposing addresses to registered and logged in users was a\n decision taken when our instance was set up.\n\n This has now been changed to make the email addresses hidden for\n regular users too (Developers and Coordinators can still see\n them). The \"Email address\" column from the user listing page has\n been removed too.\n- \"It sends a number of unnecessary emails and notifications, and it\n is difficult, if not impossible, to configure.\"\n - This can be configured.\n- \"Creating an account has been a hassle. There have been reports of\n people having trouble creating accounts or logging in.\"\n - The main issue is confirmation emails being marked as spam. Work\n has been done to resolve the issue.\n\nMigration considerations\n\nThis section describes issues with the migrations that might not have\nbeen addressed by PEP 581 and PEP 588.\n\nPEP 588 suggests to add a button to migrate issues to GitHub only when\nsomeone wants to keep working on them. This approach has several issues,\nbut there are also other issues that will need to be addressed\nregardless of the approach used:\n\n- Vendor lock-in. GitHub is proprietary and there is risk of vendor\n lock-in. Their business model might change and they could shut down\n altogether. For example, several projects decided to move away from\n GitHub after Microsoft acquisition.\n\n If/when the repository is no longer available on GitHub, we will be\n forced to migrate again and all the links to the issues won't work\n anymore.\n\n- Required bpo updates. bpo will need to be updated in order to add a\n button that, once pressed, creates a new issue on GitHub, copies\n over all the messages, attachments, and creates/adds labels for the\n existing fields. Permissions will also need to be tweaked to make\n individual issues read-only once they are migrated, and to prevent\n users to create new accounts. It might be necessary to set up\n redirects (see below).\n\n- Two trackers. If issues are migrated on demand, the issues will be\n split between two trackers. Referencing and searching issues will\n take significant more effort.\n\n- Lossy conversion. GitHub only mechanism to add custom metadata is\n through labels. bpo uses a number of fields to specify several\n different metadata. Preserving all fields and values will result in\n too many labels. If only some fields and values are preserved the\n others will be lost (unless there is a way to preserve them\n elsewhere).\n\n- Issue IDs preservation. GitHub doesn't provide a way to set and\n preserve the ID of migrated issues. Some projects managed to\n preserve the IDs by contacting the GitHub staff and migrating the\n issues en masse. However, this is no longer possible, since PRs and\n issues share the same namespace and PRs already use existing bpo\n issue IDs.\n\n- Internal issue links preservation. Existing issues might contain\n references to other issues in messages and fields (e.g. dependencies\n or superseder). Since the issue ID will change during the migration,\n these will need to be updated. If the issues are migrated on demand,\n all the existing internal references to the migrated issues (on both\n bpo and GitHub issues) will have to be updated.\n\n Setting up a redirect for each migrated issue on bpo might mitigate\n the issue, however -- if references in migrated messages are not\n updated -- it will cause confusion (e.g. if bpo issue #1234 becomes\n GitHub issue #4321, a reference to #1234 in a migrated message could\n link to bpo #1234 and bpo can redirect to GitHub issue #4321, but\n new references to #1234 will link to GitHub PR #1234 rather than\n GitHub issue #4321). Manually having to specify a bpo- or gh- prefix\n is error prone.\n\n- External issue links preservation. A number of websites, mails, etc.\n link to bpo issues. If bpo is shut down, these links will break. If\n we don't want to break the links, we will have to keep bpo alive and\n set up a redirect system that links to the corresponding GitHub\n issue.\n\n In addition, if GitHub shuts down, we won't have any way to setup\n redirects and preserve external links to GitHub issues.\n\n- References preservation and updating. In addition to issue\n references, bpo converts a number of other references into links,\n including message and PR IDs, changeset numbers, legacy SVN revision\n numbers, paths to files in the repo, files in tracebacks (detecting\n the correct branch), and links to devguide pages and sections.\n\n Since Roundup converts references to links when messages are\n requested, it is possible to update the target and generate the\n correct link. This need already arose several times, for example:\n files and HG changesets moved from hg.python.org to GitHub and the\n devguide moved from docs.python.org/devguide to devguide.python.org.\n\n Since messages on GitHub are static, the links will need to be\n generated and hardcoded during the migration or they will be lost.\n In order to update them, a tool to find all references and\n regenerate the links will need to be written.\n\n- Roundup and bpo maintenance. On top of the aforementioned changes to\n bpo and development of tools required to migrate to GitHub issues,\n we will still need to keep running and maintaining Roundup, both for\n our bpo instance (read-only) and for the Jython and Roundup trackers\n (read-write).\n\n Even if eventually we migrate all bpo issues to GitHub and we stop\n maintaining Jython and Roundup, bpo will need to be maintained and\n redirect to the corresponding GitHub issues.\n\n- Bots maintenance. Since it's not possible to customize GitHub\n directly, it's also necessary to write, maintain, and host bots.\n Even if eventually we stop maintaining Roundup, the maintenance\n burden simply shifted from Roundup to the bots. Hosting each\n different bot also has a monetary cost.\n\n- Using issue templates. Manually editing issue templates to \"remove\n texts that don't apply to [the] issue\" is cumbersome and\n error-prone.\n\n- Signal to noise ratio. Switching to GitHub Issues will likely\n increase the number of invalid reports and increase the triaging\n effort. This concern has been raised in the past in a Zulip topic.\n\n There have been already cases where people posted comments on PRs\n that required moderators to mark them as off-topic or disruptive,\n delete them altogether, and even lock the conversation (for example,\n this PR.\n\n- Weekly tracker reports and stats. Roundup sends weekly reports to\n python-dev with a summary that includes new issues, recent issues\n with no replies, recent issues waiting for review, most discussed\n issues, closed issues, and deltas for open/closed/total issue counts\n (for example, see this summary). The report provides an easy way to\n keep track of the tracker activity and to make sure that issues that\n require attention are noticed.\n\n The data collect by the weekly report is also used to generate\n statistics and graphs that can be used to gain new insights.\n\n- bpo-related MLs. There are currently two mailing lists where bpo\n posts new tracker issues and all messages respectively:\n new-bugs-announce and python-bugs-list. A new system will need to be\n developed to preserve this functionality. These MLs offer additional\n ways to keep track of the tracker activity.\n\nCopyright\n\nThis document has been placed in the public domain."},"source":{"kind":"string","value":"python-peps"},"added":{"kind":"string","value":"2024-10-18T13:23:32.400624"},"created":{"kind":"timestamp","value":"2019-05-12T00:00:00","string":"2019-05-12T00:00:00"},"metadata":{"kind":"string","value":"{\n \"license\": \"Public Domain\",\n \"url\": \"https://peps.python.org/pep-0595/\",\n \"authors\": [\n \"Berker Peksag\",\n \"Ezio Melotti\"\n ],\n \"pep_number\": \"0595\",\n \"pandoc_version\": \"3.5\"\n}"}}},{"rowIdx":526,"cells":{"id":{"kind":"string","value":"0576"},"text":{"kind":"string","value":"PEP: 576 Title: Rationalize Built-in function classes Author: Mark\nShannon BDFL-Delegate: Petr Viktorin Status: Withdrawn\nType: Standards Track Content-Type: text/x-rst Created: 10-May-2018\nPython-Version: 3.8 Post-History: 17-May-2018, 23-Jun-2018, 08-Jul-2018,\n29-Mar-2019\n\nAbstract\n\nExpose the \"FastcallKeywords\" convention used internally by CPython to\nthird-party code, and make the inspect module use duck-typing. In\ncombination this will allow third-party C extensions and tools like\nCython to create objects that use the same calling conventions as\nbuilt-in and Python functions, thus gaining performance parity with\nbuilt-in functions like len or print.\n\nA small improvement in the performance of existing code is expected.\n\nMotivation\n\nCurrently third-party module authors face a dilemma when implementing\nfunctions in C. Either they can use one of the pre-existing built-in\nfunction or method classes or implement their own custom class in C. The\nfirst choice causes them to lose the ability to access the internals of\nthe callable object. The second choice is an additional maintenance\nburden and, more importantly, has a significant negative impact on\nperformance.\n\nThis PEP aims to allow authors of third-party C modules, and tools like\nto Cython, to utilize the faster calling convention used internally by\nCPython for built-in functions and methods, and to do so without a loss\nof capabilities relative to a function implemented in Python.\n\nIntrospection\n\nThe inspect module will fully support duck-typing when introspecting\ncallables.\n\nThe inspect.Signature.from_callable() function computes the signature of\na callable. If an object has a __signature__ property, then\ninspect.Signature.from_callable() simply returns that. To further\nsupport duck-typing, if a callable has a __text_signature__ then the\n__signature__ will be created from that.\n\nThis means that 3rd party builtin-functions can implement\n__text_signature__ if sufficient, and the more expensive __signature__\nif necessary.\n\nEfficient calls to third-party callables\n\nCurrently the majority of calls are dispatched to functions and\nmethod_descriptors in custom code, using the \"FastcallKeywords\" internal\ncalling convention. This PEP proposes that this calling convention is\nimplemented via a C function pointer. Third-party callables which\nimplement this binary interface will have the potential to be called as\nfast as a built-in function.\n\nContinued prohibition of callable classes as base classes\n\nCurrently any attempt to use function, method or method_descriptor as a\nbase class for a new class will fail with a TypeError. This behaviour is\ndesirable as it prevents errors when a subclass overrides the __call__\nmethod. If callables could be sub-classed then any call to a function or\na method_descriptor would need an additional check that the __call__\nmethod had not been overridden. By exposing an additional call\nmechanism, the potential for errors becomes greater. As a consequence,\nany third-party class implementing the addition call interface will not\nbe usable as a base class.\n\nNew classes and changes to existing classes\n\nPython visible changes\n\n1. A new built-in class, builtin_function, will be added.\n2. types.BuiltinFunctionType will refer to builtin_function not\n builtin_function_or_method.\n3. Instances of the builtin_function class will retain the __module__\n property of builtin_function_or_method and gain the func_module and\n func_globals properties. The func_module allows access to the module\n to which the function belongs. Note that this is different from the\n __module__ property which merely returns the name of the module. The\n func_globals property is equivalent to func_module.__dict__ and is\n provided to mimic the Python function property of the same name.\n4. When binding a method_descriptor instance to an instance of its\n owning class, a bound_method will be created instead of a\n builtin_function_or_method. This means that the method_descriptors\n now mimic the behaviour of Python functions more closely. In other\n words, [].append becomes a bound_method instead of a\n builtin_function_or_method.\n\nC API changes\n\n1. A new function\n PyBuiltinFunction_New(PyMethodDef *ml, PyObject *module) is added to\n create built-in functions.\n2. PyCFunction_NewEx() and PyCFunction_New() are deprecated and will\n return a PyBuiltinFunction if able, otherwise a\n builtin_function_or_method.\n\nRetaining backwards compatibility in the C API and ABI\n\nThe proposed changes are fully backwards and forwards compatible at both\nthe API and ABI level.\n\nInternal C changes\n\nTwo new flags will be allowed for the typeobject.tp_flags field. These\nare Py_TPFLAGS_EXTENDED_CALL and Py_TPFLAGS_FUNCTION_DESCRIPTOR\n\nPy_TPFLAGS_EXTENDED_CALL\n\nFor any built-in class that sets Py_TPFLAGS_EXTENDED_CALL The C struct\ncorresponding to this built-in class must begin with the struct\nPyExtendedCallable which is defined as follows:\n\n typedef PyObject *(*extended_call_ptr)(PyObject *callable, PyObject** args,\n int positional_argcount, PyTupleObject* kwnames);\n\n typedef struct {\n PyObject_HEAD\n extended_call_ptr ext_call;\n } PyExtendedCallable;\n\nAny class that sets the Py_TPFLAGS_EXTENDED_CALL cannot be used as a\nbase class and a TypeError will be raised if any Python code tries to\nuse it a base class.\n\nPy_TPFLAGS_FUNCTION_DESCRIPTOR\n\nIf this flag is set for a built-in class F, then instances of that class\nare expected to behave the same as a Python function when used as a\nclass attribute. Specifically, this mean that the value of c.m where C.m\nis an instanceof the built-in class F (and c is an instance of C) must\nbe a bound-method binding C.m and c. Without this flag, it would be\nimpossible for custom callables to behave like Python functions and be\nefficient as Python or built-in functions.\n\nChanges to existing C structs\n\nThe function, method_descriptor and method classes will have their\ncorresponding structs changed to start with the PyExtendedCallable\nstruct.\n\nThird-party built-in classes using the new extended call interface\n\nTo enable call performance on a par with Python functions and built-in\nfunctions, third-party callables should set the Py_TPFLAGS_EXTENDED_CALL\nbit of tp_flags and ensure that the corresponding C struct starts with\nthe PyExtendedCallable. Any built-in class that has the\nPy_TPFLAGS_EXTENDED_CALL bit set must also implement the tp_call\nfunction and make sure its behaviour is consistent with the ext_call\nfunction.\n\nPerformance implications of these changes\n\nAdding a function pointer to each callable, rather than each class of\ncallable, enables the choice of dispatching function (the code to\nshuffle arguments about and do error checking) to be made when the\ncallable object is created rather than when it is called. This should\nreduce the number of instructions executed between the call-site in the\ninterpreter and the execution of the callee.\n\nAlternative Suggestions\n\nPEP 580 is an alternative approach to solving the same problem as this\nPEP.\n\nReference implementation\n\nA draft implementation can be found at\nhttps://github.com/markshannon/cpython/tree/pep-576-minimal\n\nCopyright\n\nThis document has been placed in the public domain."},"source":{"kind":"string","value":"python-peps"},"added":{"kind":"string","value":"2024-10-18T13:23:32.413041"},"created":{"kind":"timestamp","value":"2018-05-10T00:00:00","string":"2018-05-10T00:00:00"},"metadata":{"kind":"string","value":"{\n \"license\": \"Public Domain\",\n \"url\": \"https://peps.python.org/pep-0576/\",\n \"authors\": [\n \"Mark Shannon\"\n ],\n \"pep_number\": \"0576\",\n \"pandoc_version\": \"3.5\"\n}"}}},{"rowIdx":527,"cells":{"id":{"kind":"string","value":"3130"},"text":{"kind":"string","value":"PEP: 3130 Title: Access to Current Module/Class/Function Version:\n$Revision$ Last-Modified: $Date$ Author: Jim J. Jewett\n Status: Rejected Type: Standards Track\nContent-Type: text/x-rst Created: 22-Apr-2007 Python-Version: 3.0\nPost-History: 22-Apr-2007\n\nRejection Notice\n\nThis PEP is rejected. It is not clear how it should be implemented or\nwhat the precise semantics should be in edge cases, and there aren't\nenough important use cases given. response has been lukewarm at best.\n\nAbstract\n\nIt is common to need a reference to the current module, class, or\nfunction, but there is currently no entirely correct way to do this.\nThis PEP proposes adding the keywords __module__, __class__, and\n__function__.\n\nRationale for __module__\n\nMany modules export various functions, classes, and other objects, but\nwill perform additional activities (such as running unit tests) when run\nas a script. The current idiom is to test whether the module's name has\nbeen set to magic value.\n\n if __name__ == \"__main__\": ...\n\nMore complicated introspection requires a module to (attempt to) import\nitself. If importing the expected name actually produces a different\nmodule, there is no good workaround.\n\n # __import__ lets you use a variable, but... it gets more\n # complicated if the module is in a package.\n __import__(__name__)\n\n # So just go to sys modules... and hope that the module wasn't\n # hidden/removed (perhaps for security), that __name__ wasn't\n # changed, and definitely hope that no other module with the\n # same name is now available.\n class X(object):\n pass\n\n import sys\n mod = sys.modules[__name__]\n mod = sys.modules[X.__class__.__module__]\n\nProposal: Add a __module__ keyword which refers to the module currently\nbeing defined (executed). (But see open issues.)\n\n # XXX sys.main is still changing as draft progresses. May\n # really need sys.modules[sys.main]\n if __module__ is sys.main: # assumes PEP (3122), Cannon\n ...\n\nRationale for __class__\n\nClass methods are passed the current instance; from this they can\ndetermine self.__class__ (or cls, for class methods). Unfortunately,\nthis reference is to the object's actual class, which may be a subclass\nof the defining class. The current workaround is to repeat the name of\nthe class, and assume that the name will not be rebound.\n\n class C(B):\n\n def meth(self):\n super(C, self).meth() # Hope C is never rebound.\n\n class D(C):\n\n def meth(self):\n # ?!? issubclass(D,C), so it \"works\":\n super(C, self).meth()\n\nProposal: Add a __class__ keyword which refers to the class currently\nbeing defined (executed). (But see open issues.)\n\n class C(B):\n def meth(self):\n super(__class__, self).meth()\n\nNote that super calls may be further simplified by the \"New Super\" PEP\n(Spealman). The __class__ (or __this_class__) attribute came up in\nattempts to simplify the explanation and/or implementation of that PEP,\nbut was separated out as an independent decision.\n\nNote that __class__ (or __this_class__) is not quite the same as the\n__thisclass__ property on bound super objects. The existing\nsuper.__thisclass__ property refers to the class from which the Method\nResolution Order search begins. In the above class D, it would refer to\n(the current reference of name) C.\n\nRationale for __function__\n\nFunctions (including methods) often want access to themselves, usually\nfor a private storage location or true recursion. While there are\nseveral workarounds, all have their drawbacks.\n\n def counter(_total=[0]):\n # _total shouldn't really appear in the\n # signature at all; the list wrapping and\n # [0] unwrapping obscure the code\n _total[0] += 1\n return _total[0]\n\n @annotate(total=0)\n def counter():\n # Assume name counter is never rebound:\n counter.total += 1\n return counter.total\n\n # class exists only to provide storage:\n class _wrap(object):\n\n __total = 0\n\n def f(self):\n self.__total += 1\n return self.__total\n\n # set module attribute to a bound method:\n accum = _wrap().f\n\n # This function calls \"factorial\", which should be itself --\n # but the same programming styles that use heavy recursion\n # often have a greater willingness to rebind function names.\n def factorial(n):\n return (n * factorial(n-1) if n else 1)\n\nProposal: Add a __function__ keyword which refers to the function (or\nmethod) currently being defined (executed). (But see open issues.)\n\n @annotate(total=0)\n def counter():\n # Always refers to this function obj:\n __function__.total += 1\n return __function__.total\n\n def factorial(n):\n return (n * __function__(n-1) if n else 1)\n\nBackwards Compatibility\n\nWhile a user could be using these names already, double-underscore names\n( __anything__ ) are explicitly reserved to the interpreter. It is\ntherefore acceptable to introduce special meaning to these names within\na single feature release.\n\nImplementation\n\nIdeally, these names would be keywords treated specially by the bytecode\ncompiler.\n\nGuido has suggested[1] using a cell variable filled in by the metaclass.\n\nMichele Simionato has provided a prototype using bytecode hacks[2]. This\ndoes not require any new bytecode operators; it just modifies the which\nspecific sequence of existing operators gets run.\n\nOpen Issues\n\n- Are __module__, __class__, and __function__ the right names? In\n particular, should the names include the word \"this\", either as\n __this_module__, __this_class__, and __this_function__, (format\n discussed on the python-3000 and python-ideas lists) or as\n __thismodule__, __thisclass__, and __thisfunction__ (inspired by,\n but conflicting with, current usage of super.__thisclass__).\n- Are all three keywords needed, or should this enhancement be limited\n to a subset of the objects? Should methods be treated separately\n from other functions?\n\nReferences\n\nCopyright\n\nThis document has been placed in the public domain.\n\n[1] Fixing super anyone? Guido van Rossum\nhttps://mail.python.org/pipermail/python-3000/2007-April/006671.html\n\n[2] Descriptor/Decorator challenge, Michele Simionato\nhttp://groups.google.com/group/comp.lang.python/browse_frm/thread/a6010c7494871bb1/62a2da68961caeb6?lnk=gst&q=simionato+challenge&rnum=1&hl=en#62a2da68961caeb6"},"source":{"kind":"string","value":"python-peps"},"added":{"kind":"string","value":"2024-10-18T13:23:32.422498"},"created":{"kind":"timestamp","value":"2007-04-22T00:00:00","string":"2007-04-22T00:00:00"},"metadata":{"kind":"string","value":"{\n \"license\": \"Public Domain\",\n \"url\": \"https://peps.python.org/pep-3130/\",\n \"authors\": [\n \"Jim J. Jewett\"\n ],\n \"pep_number\": \"3130\",\n \"pandoc_version\": \"3.5\"\n}"}}},{"rowIdx":528,"cells":{"id":{"kind":"string","value":"0429"},"text":{"kind":"string","value":"PEP: 429 Title: Python 3.4 Release Schedule Version: $Revision$\nLast-Modified: $Date$ Author: Larry Hastings \nStatus: Final Type: Informational Topic: Release Content-Type:\ntext/x-rst Created: 17-Oct-2012 Python-Version: 3.4\n\nAbstract\n\nThis document describes the development and release schedule for Python\n3.4. The schedule primarily concerns itself with PEP-sized items.\n\nRelease Manager and Crew\n\n- 3.4 Release Manager: Larry Hastings\n- Windows installers: Martin v. Löwis\n- Mac installers: Ned Deily\n- Documentation: Georg Brandl\n\nRelease Schedule\n\nPython 3.4 has now reached its end-of-life and has been retired. No more\nreleases will be made.\n\nThese are all the historical releases of Python 3.4, including their\nrelease dates.\n\n- 3.4.0 alpha 1: August 3, 2013\n- 3.4.0 alpha 2: September 9, 2013\n- 3.4.0 alpha 3: September 29, 2013\n- 3.4.0 alpha 4: October 20, 2013\n- 3.4.0 beta 1: November 24, 2013\n- 3.4.0 beta 2: January 5, 2014\n- 3.4.0 beta 3: January 26, 2014\n- 3.4.0 candidate 1: February 10, 2014\n- 3.4.0 candidate 2: February 23, 2014\n- 3.4.0 candidate 3: March 9, 2014\n- 3.4.0 final: March 16, 2014\n- 3.4.1 candidate 1: May 5, 2014\n- 3.4.1 final: May 18, 2014\n- 3.4.2 candidate 1: September 22, 2014\n- 3.4.2 final: October 6, 2014\n- 3.4.3 candidate 1: February 8, 2015\n- 3.4.3 final: February 25, 2015\n- 3.4.4 candidate 1: December 6, 2015\n- 3.4.4 final: December 20, 2015\n- 3.4.5 candidate 1: June 12, 2016\n- 3.4.5 final: June 26, 2016\n- 3.4.6 candidate 1: January 2, 2017\n- 3.4.6 final: January 17, 2017\n- 3.4.7 candidate 1: July 25, 2017\n- 3.4.7 final: August 9, 2017\n- 3.4.8 candidate 1: January 23, 2018\n- 3.4.8 final: February 4, 2018\n- 3.4.9 candidate 1: July 19, 2018\n- 3.4.9 final: August 2, 2018\n- 3.4.10 candidate 1: March 4, 2019\n- 3.4.10 final: March 18, 2019\n\nFeatures for 3.4\n\nImplemented / Final PEPs:\n\n- PEP 428, a \"pathlib\" module providing object-oriented filesystem\n paths\n- PEP 435, a standardized \"enum\" module\n- PEP 436, a build enhancement that will help generate introspection\n information for builtins\n- PEP 442, improved semantics for object finalization\n- PEP 443, adding single-dispatch generic functions to the standard\n library\n- PEP 445, a new C API for implementing custom memory allocators\n- PEP 446, changing file descriptors to not be inherited by default in\n subprocesses\n- PEP 450, a new \"statistics\" module\n- PEP 451, standardizing module metadata for Python's module import\n system\n- PEP 453, a bundled installer for the pip package manager\n- PEP 454, a new \"tracemalloc\" module for tracing Python memory\n allocations\n- PEP 456, a new hash algorithm for Python strings and binary data\n- PEP 3154, a new and improved protocol for pickled objects\n- PEP 3156, a new \"asyncio\" module, a new framework for asynchronous\n I/O\n\nDeferred to post-3.4:\n\n- PEP 431, improved support for time zone databases\n- PEP 441, improved Python zip application support\n- PEP 447, support for __locallookup__ metaclass method\n- PEP 448, additional unpacking generalizations\n- PEP 455, key transforming dictionary\n\nCopyright\n\nThis document has been placed in the public domain.\n\n\f\n\n Local Variables: mode: indented-text indent-tabs-mode: nil\n sentence-end-double-space: t fill-column: 70 coding: utf-8 End:"},"source":{"kind":"string","value":"python-peps"},"added":{"kind":"string","value":"2024-10-18T13:23:32.433786"},"created":{"kind":"timestamp","value":"2012-10-17T00:00:00","string":"2012-10-17T00:00:00"},"metadata":{"kind":"string","value":"{\n \"license\": \"Public Domain\",\n \"url\": \"https://peps.python.org/pep-0429/\",\n \"authors\": [\n \"Larry Hastings\"\n ],\n \"pep_number\": \"0429\",\n \"pandoc_version\": \"3.5\"\n}"}}},{"rowIdx":529,"cells":{"id":{"kind":"string","value":"0336"},"text":{"kind":"string","value":"PEP: 336 Title: Make None Callable Version: $Revision$ Last-Modified:\n$Date$ Author: Andrew McClelland Status:\nRejected Type: Standards Track Content-Type: text/x-rst Created:\n28-Oct-2004 Post-History:\n\nAbstract\n\nNone should be a callable object that when called with any arguments has\nno side effect and returns None.\n\nBDFL Pronouncement\n\nThis PEP is rejected. It is considered a feature that None raises an\nerror when called. The proposal falls short in tests for obviousness,\nclarity, explicitness, and necessity. The provided Switch example is\nnice but easily handled by a simple lambda definition. See python-dev\ndiscussion on 17 June 2005[1].\n\nMotivation\n\nTo allow a programming style for selectable actions that is more in\naccordance with the minimalistic functional programming goals of the\nPython language.\n\nRationale\n\nAllow the use of None in method tables as a universal no effect rather\nthan either (1) checking a method table entry against None before\ncalling, or (2) writing a local no effect method with arguments similar\nto other functions in the table.\n\nThe semantics would be effectively:\n\n class None:\n\n def __call__(self, *args):\n pass\n\nHow To Use\n\nBefore, checking function table entry against None:\n\n class Select:\n\n def a(self, input):\n print 'a'\n\n def b(self, input):\n print 'b'\n\n def c(self, input):\n print 'c'\n\n def __call__(self, input):\n function = { 1 : self.a,\n 2 : self.b,\n 3 : self.c\n }.get(input, None)\n if function: return function(input)\n\nBefore, using a local no effect method:\n\n class Select:\n\n def a(self, input):\n print 'a'\n\n def b(self, input):\n print 'b'\n\n def c(self, input):\n print 'c'\n\n def nop(self, input):\n pass\n\n def __call__(self, input):\n return { 1 : self.a,\n 2 : self.b,\n 3 : self.c\n }.get(input, self.nop)(input)\n\nAfter:\n\n class Select:\n\n def a(self, input):\n print 'a'\n\n def b(self, input):\n print 'b'\n\n def c(self, input):\n print 'c'\n\n def __call__(self, input):\n return { 1 : self.a,\n 2 : self.b,\n 3 : self.c\n }.get(input, None)(input)\n\nReferences\n\nCopyright\n\nThis document has been placed in the public domain.\n\n[1] Raymond Hettinger, Propose to reject PEP 336 -- Make None Callable\nhttps://mail.python.org/pipermail/python-dev/2005-June/054280.html"},"source":{"kind":"string","value":"python-peps"},"added":{"kind":"string","value":"2024-10-18T13:23:32.440032"},"created":{"kind":"timestamp","value":"2004-10-28T00:00:00","string":"2004-10-28T00:00:00"},"metadata":{"kind":"string","value":"{\n \"license\": \"Public Domain\",\n \"url\": \"https://peps.python.org/pep-0336/\",\n \"authors\": [\n \"Andrew McClelland\"\n ],\n \"pep_number\": \"0336\",\n \"pandoc_version\": \"3.5\"\n}"}}},{"rowIdx":530,"cells":{"id":{"kind":"string","value":"0216"},"text":{"kind":"string","value":"PEP: 216 Title: Docstring Format Author: Moshe Zadka\n Status: Withdrawn Type: Informational Created:\n31-Jul-2000 Post-History: Superseded-By: 287\n\nIt has been superseded by PEP 287.\n\nAbstract\n\nNamed Python objects, such as modules, classes and functions, have a\nstring attribute called __doc__. If the first expression inside the\ndefinition is a literal string, that string is assigned to the __doc__\nattribute.\n\nThe __doc__ attribute is called a documentation string, or docstring. It\nis often used to summarize the interface of the module, class or\nfunction. However, since there is no common format for documentation\nstring, tools for extracting docstrings and transforming those into\ndocumentation in a standard format (e.g., DocBook) have not sprang up in\nabundance, and those that do exist are for the most part unmaintained\nand unused.\n\nPerl Documentation\n\nIn Perl, most modules are documented in a format called POD -- Plain Old\nDocumentation. This is an easy-to-type, very low level format which\nintegrates well with the Perl parser. Many tools exist to turn POD\ndocumentation into other formats: info, HTML and man pages, among\nothers. However, in Perl, the information is not available at run-time.\n\nJava Documentation\n\nIn Java, special comments before classes and functions function to\ndocument the code. A program to extract these, and turn them into HTML\ndocumentation is called javadoc, and is part of the standard Java\ndistribution. However, the only output format that is supported is HTML,\nand JavaDoc has a very intimate relationship with HTML.\n\nPython Docstring Goals\n\nPython documentation string are easy to spot during parsing, and are\nalso available to the runtime interpreter. This double purpose is a bit\nproblematic, sometimes: for example, some are reluctant to have too long\ndocstrings, because they do not want to take much space in the runtime.\nIn addition, because of the current lack of tools, people read objects'\ndocstrings by \"print\"ing them, so a tendency to make them brief and free\nof markups has sprung up. This tendency hinders writing better\ndocumentation-extraction tools, since it causes docstrings to contain\nlittle information, which is hard to parse.\n\nHigh Level Solutions\n\nTo counter the objection that the strings take up place in the running\nprogram, it is suggested that documentation extraction tools will\nconcatenate a maximum prefix of string literals which appear in the\nbeginning of a definition. The first of these will also be available in\nthe interactive interpreter, so it should contain a few summary lines.\n\nDocstring Format Goals\n\nThese are the goals for the docstring format, as discussed ad nauseam in\nthe doc-sig.\n\n1. It must be easy to type with any standard text editor.\n2. It must be readable to the casual observer.\n3. It must not contain information which can be deduced from parsing\n the module.\n4. It must contain sufficient information so it can be converted to any\n reasonable markup format.\n5. It must be possible to write a module's entire documentation in\n docstrings, without feeling hampered by the markup language.\n\nDocstring Contents\n\nFor requirement 5. above, it is needed to specify what must be in\ndocstrings.\n\nAt least the following must be available:\n\na. A tag that means \"this is a Python something, guess what\"\n\n Example: In the sentence \"The POP3 class\", we need to markup \"POP3\"\n so. The parser will be able to guess it is a class from the contents\n of the poplib module, but we need to make it guess.\n\nb. Tags that mean \"this is a Python class/module/class var/instance\n var...\"\n\n Example: The usual Python idiom for singleton class A is to have _A\n as the class, and A a function which returns _A objects. It's usual\n to document the class, nonetheless, as being A. This requires the\n strength to say \"The class A\" and have A hyperlinked and marked-up\n as a class.\n\nc. An easy way to include Python source code/Python interactive\n sessions\n\nd. Emphasis/bold\n\ne. List/tables\n\nDocstring Basic Structure\n\nThe documentation strings will be in StructuredTextNG\n(http://www.zope.org/Members/jim/StructuredTextWiki/StructuredTextNG)\nSince StructuredText is not yet strong enough to handle (a) and (b)\nabove, we will need to extend it. I suggest using\n[:python identifier]. E.g.: [class:POP3],\n[:POP3.list], etc. If the description is missing, a guess will be made\nfrom the text.\n\nUnresolved Issues\n\nIs there a way to escape characters in ST? If so, how? (example: * at\nthe beginning of a line without being bullet symbol)\n\nIs my suggestion above for Python symbols compatible with ST-NG? How\nhard would it be to extend ST-NG to support it?\n\nHow do we describe input and output types of functions?\n\nWhat additional constraint do we enforce on each docstring?\n(module/class/function)?\n\nWhat are the guesser rules?\n\nRejected Suggestions\n\nXML -- it's very hard to type, and too cluttered to read it comfortably."},"source":{"kind":"string","value":"python-peps"},"added":{"kind":"string","value":"2024-10-18T13:23:32.448319"},"created":{"kind":"timestamp","value":"2000-07-31T00:00:00","string":"2000-07-31T00:00:00"},"metadata":{"kind":"string","value":"{\n \"license\": \"Public Domain\",\n \"url\": \"https://peps.python.org/pep-0216/\",\n \"authors\": [\n \"Moshe Zadka\"\n ],\n \"pep_number\": \"0216\",\n \"pandoc_version\": \"3.5\"\n}"}}},{"rowIdx":531,"cells":{"id":{"kind":"string","value":"0498"},"text":{"kind":"string","value":"PEP: 498 Title: Literal String Interpolation Version: $Revision$\nLast-Modified: $Date$ Author: Eric V. Smith Status:\nFinal Type: Standards Track Content-Type: text/x-rst Created:\n01-Aug-2015 Python-Version: 3.6 Post-History: 07-Aug-2015, 30-Aug-2015,\n04-Sep-2015, 19-Sep-2015, 06-Nov-2016 Resolution:\nhttps://mail.python.org/pipermail/python-dev/2015-September/141526.html\n\nAbstract\n\nPython supports multiple ways to format text strings. These include\n%-formatting[1], str.format()[2], and string.Template [3]. Each of these\nmethods have their advantages, but in addition have disadvantages that\nmake them cumbersome to use in practice. This PEP proposed to add a new\nstring formatting mechanism: Literal String Interpolation. In this PEP,\nsuch strings will be referred to as \"f-strings\", taken from the leading\ncharacter used to denote such strings, and standing for \"formatted\nstrings\".\n\nThis PEP does not propose to remove or deprecate any of the existing\nstring formatting mechanisms.\n\nF-strings provide a way to embed expressions inside string literals,\nusing a minimal syntax. It should be noted that an f-string is really an\nexpression evaluated at run time, not a constant value. In Python source\ncode, an f-string is a literal string, prefixed with 'f', which contains\nexpressions inside braces. The expressions are replaced with their\nvalues. Some examples are:\n\n >>> import datetime\n >>> name = 'Fred'\n >>> age = 50\n >>> anniversary = datetime.date(1991, 10, 12)\n >>> f'My name is {name}, my age next year is {age+1}, my anniversary is {anniversary:%A, %B %d, %Y}.'\n 'My name is Fred, my age next year is 51, my anniversary is Saturday, October 12, 1991.'\n >>> f'He said his name is {name!r}.'\n \"He said his name is 'Fred'.\"\n\nA similar feature was proposed in PEP 215. PEP 215 proposed to support a\nsubset of Python expressions, and did not support the type-specific\nstring formatting (the __format__() method) which was introduced with\nPEP 3101.\n\nRationale\n\nThis PEP is driven by the desire to have a simpler way to format strings\nin Python. The existing ways of formatting are either error prone,\ninflexible, or cumbersome.\n\n%-formatting is limited as to the types it supports. Only ints, strs,\nand doubles can be formatted. All other types are either not supported,\nor converted to one of these types before formatting. In addition,\nthere's a well-known trap where a single value is passed:\n\n >>> msg = 'disk failure'\n >>> 'error: %s' % msg\n 'error: disk failure'\n\nBut if msg were ever to be a tuple, the same code would fail:\n\n >>> msg = ('disk failure', 32)\n >>> 'error: %s' % msg\n Traceback (most recent call last):\n File \"\", line 1, in \n TypeError: not all arguments converted during string formatting\n\nTo be defensive, the following code should be used:\n\n >>> 'error: %s' % (msg,)\n \"error: ('disk failure', 32)\"\n\nstr.format() was added to address some of these problems with\n%-formatting. In particular, it uses normal function call syntax (and\ntherefore supports multiple parameters) and it is extensible through the\n__format__() method on the object being converted to a string. See PEP\n3101 for a detailed rationale. This PEP reuses much of the str.format()\nsyntax and machinery, in order to provide continuity with an existing\nPython string formatting mechanism.\n\nHowever, str.format() is not without its issues. Chief among them is its\nverbosity. For example, the text value is repeated here:\n\n >>> value = 4 * 20\n >>> 'The value is {value}.'.format(value=value)\n 'The value is 80.'\n\nEven in its simplest form there is a bit of boilerplate, and the value\nthat's inserted into the placeholder is sometimes far removed from where\nthe placeholder is situated:\n\n >>> 'The value is {}.'.format(value)\n 'The value is 80.'\n\nWith an f-string, this becomes:\n\n >>> f'The value is {value}.'\n 'The value is 80.'\n\nF-strings provide a concise, readable way to include the value of Python\nexpressions inside strings.\n\nIn this sense, string.Template and %-formatting have similar\nshortcomings to str.format(), but also support fewer formatting options.\nIn particular, they do not support the __format__ protocol, so that\nthere is no way to control how a specific object is converted to a\nstring, nor can it be extended to additional types that want to control\nhow they are converted to strings (such as Decimal and datetime). This\nexample is not possible with string.Template:\n\n >>> value = 1234\n >>> f'input={value:#06x}'\n 'input=0x04d2'\n\nAnd neither %-formatting nor string.Template can control formatting such\nas:\n\n >>> date = datetime.date(1991, 10, 12)\n >>> f'{date} was on a {date:%A}'\n '1991-10-12 was on a Saturday'\n\nNo use of globals() or locals()\n\nIn the discussions on python-dev[4], a number of solutions where\npresented that used locals() and globals() or their equivalents. All of\nthese have various problems. Among these are referencing variables that\nare not otherwise used in a closure. Consider:\n\n >>> def outer(x):\n ... def inner():\n ... return 'x={x}'.format_map(locals())\n ... return inner\n ...\n >>> outer(42)()\n Traceback (most recent call last):\n File \"\", line 1, in \n File \"\", line 3, in inner\n KeyError: 'x'\n\nThis returns an error because the compiler has not added a reference to\nx inside the closure. You need to manually add a reference to x in order\nfor this to work:\n\n >>> def outer(x):\n ... def inner():\n ... x\n ... return 'x={x}'.format_map(locals())\n ... return inner\n ...\n >>> outer(42)()\n 'x=42'\n\nIn addition, using locals() or globals() introduces an information leak.\nA called routine that has access to the callers locals() or globals()\nhas access to far more information than needed to do the string\ninterpolation.\n\nGuido stated[5] that any solution to better string interpolation would\nnot use locals() or globals() in its implementation. (This does not\nforbid users from passing locals() or globals() in, it just doesn't\nrequire it, nor does it allow using these functions under the hood.)\n\nSpecification\n\nIn source code, f-strings are string literals that are prefixed by the\nletter 'f' or 'F'. Everywhere this PEP uses 'f', 'F' may also be used.\n'f' may be combined with 'r' or 'R', in either order, to produce raw\nf-string literals. 'f' may not be combined with 'b': this PEP does not\npropose to add binary f-strings. 'f' may not be combined with 'u'.\n\nWhen tokenizing source files, f-strings use the same rules as normal\nstrings, raw strings, binary strings, and triple quoted strings. That\nis, the string must end with the same character that it started with: if\nit starts with a single quote it must end with a single quote, etc. This\nimplies that any code that currently scans Python code looking for\nstrings should be trivially modifiable to recognize f-strings (parsing\nwithin an f-string is another matter, of course).\n\nOnce tokenized, f-strings are parsed in to literal strings and\nexpressions. Expressions appear within curly braces '{' and '}'. While\nscanning the string for expressions, any doubled braces '{{' or '}}'\ninside literal portions of an f-string are replaced by the corresponding\nsingle brace. Doubled literal opening braces do not signify the start of\nan expression. A single closing curly brace '}' in the literal portion\nof a string is an error: literal closing curly braces must be doubled\n'}}' in order to represent a single closing brace.\n\nThe parts of the f-string outside of braces are literal strings. These\nliteral portions are then decoded. For non-raw f-strings, this includes\nconverting backslash escapes such as '\\n', '\\\"', \"\\'\", '\\xhh', '\\uxxxx',\n'\\Uxxxxxxxx', and named unicode characters '\\N{name}' into their\nassociated Unicode characters[6].\n\nBackslashes may not appear anywhere within expressions. Comments, using\nthe '#' character, are not allowed inside an expression.\n\nFollowing each expression, an optional type conversion may be specified.\nThe allowed conversions are '!s', '!r', or '!a'. These are treated the\nsame as in str.format(): '!s' calls str() on the expression, '!r' calls\nrepr() on the expression, and '!a' calls ascii() on the expression.\nThese conversions are applied before the call to format(). The only\nreason to use '!s' is if you want to specify a format specifier that\napplies to str, not to the type of the expression.\n\nF-strings use the same format specifier mini-language as str.format.\nSimilar to str.format(), optional format specifiers maybe be included\ninside the f-string, separated from the expression (or the type\nconversion, if specified) by a colon. If a format specifier is not\nprovided, an empty string is used.\n\nSo, an f-string looks like:\n\n f ' { } ... '\n\nThe expression is then formatted using the __format__ protocol, using\nthe format specifier as an argument. The resulting value is used when\nbuilding the value of the f-string.\n\nNote that __format__() is not called directly on each value. The actual\ncode uses the equivalent of type(value).__format__(value, format_spec),\nor format(value, format_spec). See the documentation of the builtin\nformat() function for more details.\n\nExpressions cannot contain ':' or '!' outside of strings or parentheses,\nbrackets, or braces. The exception is that the '!=' operator is allowed\nas a special case.\n\nEscape sequences\n\nBackslashes may not appear inside the expression portions of f-strings,\nso you cannot use them, for example, to escape quotes inside f-strings:\n\n >>> f'{\\'quoted string\\'}'\n File \"\", line 1\n SyntaxError: f-string expression part cannot include a backslash\n\nYou can use a different type of quote inside the expression:\n\n >>> f'{\"quoted string\"}'\n 'quoted string'\n\nBackslash escapes may appear inside the string portions of an f-string.\n\nNote that the correct way to have a literal brace appear in the\nresulting string value is to double the brace:\n\n >>> f'{{ {4*10} }}'\n '{ 40 }'\n >>> f'{{{4*10}}}'\n '{40}'\n\nLike all raw strings in Python, no escape processing is done for raw\nf-strings:\n\n >>> fr'x={4*10}\\n'\n 'x=40\\\\n'\n\nDue to Python's string tokenizing rules, the f-string\nf'abc {a['x']} def' is invalid. The tokenizer parses this as 3 tokens:\nf'abc {a[', x, and ']} def'. Just like regular strings, this cannot be\nfixed by using raw strings. There are a number of correct ways to write\nthis f-string: with a different quote character:\n\n f\"abc {a['x']} def\"\n\nOr with triple quotes:\n\n f'''abc {a['x']} def'''\n\nCode equivalence\n\nThe exact code used to implement f-strings is not specified. However, it\nis guaranteed that any embedded value that is converted to a string will\nuse that value's __format__ method. This is the same mechanism that\nstr.format() uses to convert values to strings.\n\nFor example, this code:\n\n f'abc{expr1:spec1}{expr2!r:spec2}def{expr3}ghi'\n\nMight be evaluated as:\n\n 'abc' + format(expr1, spec1) + format(repr(expr2), spec2) + 'def' + format(expr3) + 'ghi'\n\nExpression evaluation\n\nThe expressions that are extracted from the string are evaluated in the\ncontext where the f-string appeared. This means the expression has full\naccess to local and global variables. Any valid Python expression can be\nused, including function and method calls.\n\nBecause the f-strings are evaluated where the string appears in the\nsource code, there is no additional expressiveness available with\nf-strings. There are also no additional security concerns: you could\nhave also just written the same expression, not inside of an f-string:\n\n >>> def foo():\n ... return 20\n ...\n >>> f'result={foo()}'\n 'result=20'\n\nIs equivalent to:\n\n >>> 'result=' + str(foo())\n 'result=20'\n\nExpressions are parsed with the equivalent of\nast.parse('(' + expression + ')', '', 'eval')[7].\n\nNote that since the expression is enclosed by implicit parentheses\nbefore evaluation, expressions can contain newlines. For example:\n\n >>> x = 0\n >>> f'''{x\n ... +1}'''\n '1'\n\n >>> d = {0: 'zero'}\n >>> f'''{d[0\n ... ]}'''\n 'zero'\n\nFormat specifiers\n\nFormat specifiers may also contain evaluated expressions. This allows\ncode such as:\n\n >>> width = 10\n >>> precision = 4\n >>> value = decimal.Decimal('12.34567')\n >>> f'result: {value:{width}.{precision}}'\n 'result: 12.35'\n\nOnce expressions in a format specifier are evaluated (if necessary),\nformat specifiers are not interpreted by the f-string evaluator. Just as\nin str.format(), they are merely passed in to the __format__() method of\nthe object being formatted.\n\nConcatenating strings\n\nAdjacent f-strings and regular strings are concatenated. Regular strings\nare concatenated at compile time, and f-strings are concatenated at run\ntime. For example, the expression:\n\n >>> x = 10\n >>> y = 'hi'\n >>> 'a' 'b' f'{x}' '{c}' f'str<{y:^4}>' 'd' 'e'\n\nyields the value:\n\n 'ab10{c}str< hi >de'\n\nWhile the exact method of this run time concatenation is unspecified,\nthe above code might evaluate to:\n\n 'ab' + format(x) + '{c}' + 'str<' + format(y, '^4') + '>de'\n\nEach f-string is entirely evaluated before being concatenated to\nadjacent f-strings. That means that this:\n\n >>> f'{x' f'}'\n\nIs a syntax error, because the first f-string does not contain a closing\nbrace.\n\nError handling\n\nEither compile time or run time errors can occur when processing\nf-strings. Compile time errors are limited to those errors that can be\ndetected when scanning an f-string. These errors all raise SyntaxError.\n\nUnmatched braces:\n\n >>> f'x={x'\n File \"\", line 1\n SyntaxError: f-string: expecting '}'\n\nInvalid expressions:\n\n >>> f'x={!x}'\n File \"\", line 1\n SyntaxError: f-string: empty expression not allowed\n\nRun time errors occur when evaluating the expressions inside an\nf-string. Note that an f-string can be evaluated multiple times, and\nwork sometimes and raise an error at other times:\n\n >>> d = {0:10, 1:20}\n >>> for i in range(3):\n ... print(f'{i}:{d[i]}')\n ...\n 0:10\n 1:20\n Traceback (most recent call last):\n File \"\", line 2, in \n KeyError: 2\n\nor:\n\n >>> for x in (32, 100, 'fifty'):\n ... print(f'x = {x:+3}')\n ...\n 'x = +32'\n 'x = +100'\n Traceback (most recent call last):\n File \"\", line 2, in \n ValueError: Sign not allowed in string format specifier\n\nLeading and trailing whitespace in expressions is ignored\n\nFor ease of readability, leading and trailing whitespace in expressions\nis ignored. This is a by-product of enclosing the expression in\nparentheses before evaluation.\n\nEvaluation order of expressions\n\nThe expressions in an f-string are evaluated in left-to-right order.\nThis is detectable only if the expressions have side effects:\n\n >>> def fn(l, incr):\n ... result = l[0]\n ... l[0] += incr\n ... return result\n ...\n >>> lst = [0]\n >>> f'{fn(lst,2)} {fn(lst,3)}'\n '0 2'\n >>> f'{fn(lst,2)} {fn(lst,3)}'\n '5 7'\n >>> lst\n [10]\n\nDiscussion\n\npython-ideas discussion\n\nMost of the discussions on python-ideas[8] focused on three issues:\n\n- How to denote f-strings,\n- How to specify the location of expressions in f-strings, and\n- Whether to allow full Python expressions.\n\nHow to denote f-strings\n\nBecause the compiler must be involved in evaluating the expressions\ncontained in the interpolated strings, there must be some way to denote\nto the compiler which strings should be evaluated. This PEP chose a\nleading 'f' character preceding the string literal. This is similar to\nhow 'b' and 'r' prefixes change the meaning of the string itself, at\ncompile time. Other prefixes were suggested, such as 'i'. No option\nseemed better than the other, so 'f' was chosen.\n\nAnother option was to support special functions, known to the compiler,\nsuch as Format(). This seems like too much magic for Python: not only is\nthere a chance for collision with existing identifiers, the PEP author\nfeels that it's better to signify the magic with a string prefix\ncharacter.\n\nHow to specify the location of expressions in f-strings\n\nThis PEP supports the same syntax as str.format() for distinguishing\nreplacement text inside strings: expressions are contained inside\nbraces. There were other options suggested, such as string.Template's\n$identifier or ${expression}.\n\nWhile $identifier is no doubt more familiar to shell scripters and users\nof some other languages, in Python str.format() is heavily used. A quick\nsearch of Python's standard library shows only a handful of uses of\nstring.Template, but hundreds of uses of str.format().\n\nAnother proposed alternative was to have the substituted text between \\{\nand } or between \\{ and \\}. While this syntax would probably be\ndesirable if all string literals were to support interpolation, this PEP\nonly supports strings that are already marked with the leading 'f'. As\nsuch, the PEP is using unadorned braces to denoted substituted text, in\norder to leverage end user familiarity with str.format().\n\nSupporting full Python expressions\n\nMany people on the python-ideas discussion wanted support for either\nonly single identifiers, or a limited subset of Python expressions (such\nas the subset supported by str.format()). This PEP supports full Python\nexpressions inside the braces. Without full expressions, some desirable\nusage would be cumbersome. For example:\n\n >>> f'Column={col_idx+1}'\n >>> f'number of items: {len(items)}'\n\nwould become:\n\n >>> col_number = col_idx+1\n >>> f'Column={col_number}'\n >>> n_items = len(items)\n >>> f'number of items: {n_items}'\n\nWhile it's true that very ugly expressions could be included in the\nf-strings, this PEP takes the position that such uses should be\naddressed in a linter or code review:\n\n >>> f'mapping is { {a:b for (a, b) in ((1, 2), (3, 4))} }'\n 'mapping is {1: 2, 3: 4}'\n\nSimilar support in other languages\n\nWikipedia has a good discussion of string interpolation in other\nprogramming languages[9]. This feature is implemented in many languages,\nwith a variety of syntaxes and restrictions.\n\nDifferences between f-string and str.format expressions\n\nThere is one small difference between the limited expressions allowed in\nstr.format() and the full expressions allowed inside f-strings. The\ndifference is in how index lookups are performed. In str.format(), index\nvalues that do not look like numbers are converted to strings:\n\n >>> d = {'a': 10, 'b': 20}\n >>> 'a={d[a]}'.format(d=d)\n 'a=10'\n\nNotice that the index value is converted to the string 'a' when it is\nlooked up in the dict.\n\nHowever, in f-strings, you would need to use a literal for the value of\n'a':\n\n >>> f'a={d[\"a\"]}'\n 'a=10'\n\nThis difference is required because otherwise you would not be able to\nuse variables as index values:\n\n >>> a = 'b'\n >>> f'a={d[a]}'\n 'a=20'\n\nSee[10] for a further discussion. It was this observation that led to\nfull Python expressions being supported in f-strings.\n\nFurthermore, the limited expressions that str.format() understands need\nnot be valid Python expressions. For example:\n\n >>> '{i[\";]}'.format(i={'\";':4})\n '4'\n\nFor this reason, the str.format() \"expression parser\" is not suitable\nfor use when implementing f-strings.\n\nTriple-quoted f-strings\n\nTriple quoted f-strings are allowed. These strings are parsed just as\nnormal triple-quoted strings are. After parsing and decoding, the normal\nf-string logic is applied, and __format__() is called on each value.\n\nRaw f-strings\n\nRaw and f-strings may be combined. For example, they could be used to\nbuild up regular expressions:\n\n >>> header = 'Subject'\n >>> fr'{header}:\\s+'\n 'Subject:\\\\s+'\n\nIn addition, raw f-strings may be combined with triple-quoted strings.\n\nNo binary f-strings\n\nFor the same reason that we don't support bytes.format(), you may not\ncombine 'f' with 'b' string literals. The primary problem is that an\nobject's __format__() method may return Unicode data that is not\ncompatible with a bytes string.\n\nBinary f-strings would first require a solution for bytes.format(). This\nidea has been proposed in the past, most recently in\n461#proposed-variations. The discussions of such a feature usually\nsuggest either\n\n- adding a method such as __bformat__() so an object can control how\n it is converted to bytes, or\n- having bytes.format() not be as general purpose or extensible as\n str.format().\n\nBoth of these remain as options in the future, if such functionality is\ndesired.\n\n!s, !r, and !a are redundant\n\nThe !s, !r, and !a conversions are not strictly required. Because\narbitrary expressions are allowed inside the f-strings, this code:\n\n >>> a = 'some string'\n >>> f'{a!r}'\n \"'some string'\"\n\nIs identical to:\n\n >>> f'{repr(a)}'\n \"'some string'\"\n\nSimilarly, !s can be replaced by calls to str() and !a by calls to\nascii().\n\nHowever, !s, !r, and !a are supported by this PEP in order to minimize\nthe differences with str.format(). !s, !r, and !a are required in\nstr.format() because it does not allow the execution of arbitrary\nexpressions.\n\nLambdas inside expressions\n\nBecause lambdas use the ':' character, they cannot appear outside of\nparentheses in an expression. The colon is interpreted as the start of\nthe format specifier, which means the start of the lambda expression is\nseen and is syntactically invalid. As there's no practical use for a\nplain lambda in an f-string expression, this is not seen as much of a\nlimitation.\n\nIf you feel you must use lambdas, they may be used inside of\nparentheses:\n\n >>> f'{(lambda x: x*2)(3)}'\n '6'\n\nCan't combine with 'u'\n\nThe 'u' prefix was added to Python 3.3 in PEP 414 as a means to ease\nsource compatibility with Python 2.7. Because Python 2.7 will never\nsupport f-strings, there is nothing to be gained by being able to\ncombine the 'f' prefix with 'u'.\n\nExamples from Python's source code\n\nHere are some examples from Python source code that currently use\nstr.format(), and how they would look with f-strings. This PEP does not\nrecommend wholesale converting to f-strings, these are just examples of\nreal-world usages of str.format() and how they'd look if written from\nscratch using f-strings.\n\nLib/asyncio/locks.py:\n\n extra = '{},waiters:{}'.format(extra, len(self._waiters))\n extra = f'{extra},waiters:{len(self._waiters)}'\n\nLib/configparser.py:\n\n message.append(\" [line {0:2d}]\".format(lineno))\n message.append(f\" [line {lineno:2d}]\")\n\nTools/clinic/clinic.py:\n\n methoddef_name = \"{}_METHODDEF\".format(c_basename.upper())\n methoddef_name = f\"{c_basename.upper()}_METHODDEF\"\n\npython-config.py:\n\n print(\"Usage: {0} [{1}]\".format(sys.argv[0], '|'.join('--'+opt for opt in valid_opts)), file=sys.stderr)\n print(f\"Usage: {sys.argv[0]} [{'|'.join('--'+opt for opt in valid_opts)}]\", file=sys.stderr)\n\nReferences\n\nCopyright\n\nThis document has been placed in the public domain.\n\n\f\n\n Local Variables: mode: indented-text indent-tabs-mode: nil\n sentence-end-double-space: t fill-column: 70 coding: utf-8 End:\n\n[1] %-formatting\n(https://docs.python.org/3/library/stdtypes.html#printf-style-string-formatting)\n\n[2] str.format\n(https://docs.python.org/3/library/string.html#formatstrings)\n\n[3] string.Template documentation\n(https://docs.python.org/3/library/string.html#template-strings)\n\n[4] Formatting using locals() and globals()\n(https://mail.python.org/pipermail/python-ideas/2015-July/034671.html)\n\n[5] Avoid locals() and globals()\n(https://mail.python.org/pipermail/python-ideas/2015-July/034701.html)\n\n[6] String literal description\n(https://docs.python.org/3/reference/lexical_analysis.html#string-and-bytes-literals)\n\n[7] ast.parse() documentation\n(https://docs.python.org/3/library/ast.html#ast.parse)\n\n[8] Start of python-ideas discussion\n(https://mail.python.org/pipermail/python-ideas/2015-July/034657.html)\n\n[9] Wikipedia article on string interpolation\n(https://en.wikipedia.org/wiki/String_interpolation)\n\n[10] Differences in str.format() and f-string expressions\n(https://mail.python.org/pipermail/python-ideas/2015-July/034726.html)"},"source":{"kind":"string","value":"python-peps"},"added":{"kind":"string","value":"2024-10-18T13:23:32.475463"},"created":{"kind":"timestamp","value":"2015-08-01T00:00:00","string":"2015-08-01T00:00:00"},"metadata":{"kind":"string","value":"{\n \"license\": \"Public Domain\",\n \"url\": \"https://peps.python.org/pep-0498/\",\n \"authors\": [\n \"Eric V. Smith\"\n ],\n \"pep_number\": \"0498\",\n \"pandoc_version\": \"3.5\"\n}"}}},{"rowIdx":532,"cells":{"id":{"kind":"string","value":"0531"},"text":{"kind":"string","value":"PEP: 531 Title: Existence checking operators Version: $Revision$\nLast-Modified: $Date$ Author: Alyssa Coghlan \nStatus: Withdrawn Type: Standards Track Content-Type: text/x-rst\nCreated: 25-Oct-2016 Python-Version: 3.7 Post-History: 28-Oct-2016\n\nAbstract\n\nInspired by PEP 505 and the related discussions, this PEP proposes the\naddition of two new control flow operators to Python:\n\n- Existence-checking precondition (\"exists-then\"): expr1 ?then expr2\n- Existence-checking fallback (\"exists-else\"): expr1 ?else expr2\n\nas well as the following abbreviations for common existence checking\nexpressions and statements:\n\n- Existence-checking attribute access: obj?.attr (for\n obj ?then obj.attr)\n- Existence-checking subscripting: obj?[expr] (for\n obj ?then obj[expr])\n- Existence-checking assignment: value ?= expr (for\n value = value ?else expr)\n\nThe common ? symbol in these new operator definitions indicates that\nthey use a new \"existence checking\" protocol rather than the established\ntruth-checking protocol used by if statements, while loops,\ncomprehensions, generator expressions, conditional expressions, logical\nconjunction, and logical disjunction.\n\nThis new protocol would be made available as operator.exists, with the\nfollowing characteristics:\n\n- types can define a new __exists__ magic method (Python) or tp_exists\n slot (C) to override the default behaviour. This optional method has\n the same signature and possible return values as __bool__.\n- operator.exists(None) returns False\n- operator.exists(NotImplemented) returns False\n- operator.exists(Ellipsis) returns False\n- float, complex and decimal.Decimal will override the existence check\n such that NaN values return False and other values (including zero\n values) return True\n- for any other type, operator.exists(obj) returns True by default.\n Most importantly, values that evaluate to False in a truth checking\n context (zeroes, empty containers) will still evaluate to True in an\n existence checking context\n\nPEP Withdrawal\n\nWhen posting this PEP for discussion on python-ideas[1], I asked\nreviewers to consider 3 high level design questions before moving on to\nconsidering the specifics of this particular syntactic proposal:\n\n1. Do we collectively agree that \"existence checking\" is a useful\ngeneral concept that exists in software development and is distinct from\nthe concept of \"truth checking\"? 2. Do we collectively agree that the\nPython ecosystem would benefit from an existence checking protocol that\npermits generalisation of algorithms (especially short circuiting ones)\nacross different \"data missing\" indicators, including those defined in\nthe language definition, the standard library, and custom user code? 3.\nDo we collectively agree that it would be easier to use such a protocol\neffectively if existence-checking equivalents to the truth-checking\n\"and\" and \"or\" control flow operators were available?\n\nWhile the answers to the first question were generally positive, it\nquickly became clear that the answer to the second question is \"No\".\n\nSteven D'Aprano articulated the counter-argument well in[2], but the\ngeneral idea is that when checking for \"missing data\" sentinels, we're\nalmost always looking for a specific sentinel value, rather than any\nsentinel value.\n\nNotImplemented exists, for example, due to None being a potentially\nlegitimate result from overloaded arithmetic operators and exception\nhandling imposing too much runtime overhead to be useful for operand\ncoercion.\n\nSimilarly, Ellipsis exists for multi-dimensional slicing support due to\nNone already have another meaning in a slicing context (indicating the\nuse of the default start or stop indices, or the default step size).\n\nIn mathematics, the value of NaN is that programmatically it behaves\nlike a normal value of its type (e.g. exposing all the usual attributes\nand methods), while arithmetically it behaves according to the\nmathematical rules for handling NaN values.\n\nWith that core design concept invalidated, the proposal as a whole\ndoesn't make sense, and it is accordingly withdrawn.\n\nHowever, the discussion of the proposal did prompt consideration of a\npotential protocol based approach to make the existing and, or and\nif-else operators more flexible[3] without introducing any new syntax,\nso I'll be writing that up as another possible alternative to PEP 505.\n\nRelationship with other PEPs\n\nWhile this PEP was inspired by and builds on Mark Haase's excellent work\nin putting together PEP 505, it ultimately competes with that PEP due to\nsignificant differences in the specifics of the proposed syntax and\nsemantics for the feature.\n\nIt also presents a different perspective on the rationale for the change\nby focusing on the benefits to existing Python users as the typical\ndemands of application and service development activities are genuinely\nchanging. It isn't an accident that similar features are now appearing\nin multiple programming languages, and while it's a good idea for us to\nlearn from how other language designers are handling the problem,\nprecedents being set elsewhere are more relevant to how we would go\nabout tackling this problem than they are to whether or not we think\nit's a problem we should address in the first place.\n\nRationale\n\nExistence checking expressions\n\nAn increasingly common requirement in modern software development is the\nneed to work with \"semi-structured data\": data where the structure of\nthe data is known in advance, but pieces of it may be missing at\nruntime, and the software manipulating that data is expected to degrade\ngracefully (e.g. by omitting results that depend on the missing data)\nrather than failing outright.\n\nSome particularly common cases where this issue arises are:\n\n- handling optional application configuration settings and function\n parameters\n- handling external service failures in distributed systems\n- handling data sets that include some partial records\n\nIt is the latter two cases that are the primary motivation for this\nPEP - while needing to deal with optional configuration settings and\nparameters is a design requirement at least as old as Python itself, the\nrise of public cloud infrastructure, the development of software systems\nas collaborative networks of distributed services, and the availability\nof large public and private data sets for analysis means that the\nability to degrade operations gracefully in the face of partial service\nfailures or partial data availability is becoming an essential feature\nof modern programming environments.\n\nAt the moment, writing such software in Python can be genuinely awkward,\nas your code ends up littered with expressions like:\n\n- value1 = expr1.field.of.interest if expr1 is not None else None\n- value2 = expr2[\"field\"][\"of\"][\"interest\"] if expr2 is not None else None\n- value3 = expr3 if expr3 is not None else expr4 if expr4 is not None else expr5\n\nIf these are only occasional, then expanding out to full statement forms\nmay help improve readability, but if you have 4 or 5 of them in a row\n(which is a fairly common situation in data transformation pipelines),\nthen replacing them with 16 or 20 lines of conditional logic really\ndoesn't help matters.\n\nExpanding the three examples above that way hopefully helps illustrate\nthat:\n\n if expr1 is not None:\n value1 = expr1.field.of.interest\n else:\n value1 = None\n if expr2 is not None:\n value2 = expr2[\"field\"][\"of\"][\"interest\"]\n else:\n value2 = None\n if expr3 is not None:\n value3 = expr3\n else:\n if expr4 is not None:\n value3 = expr4\n else:\n value3 = expr5\n\nThe combined impact of the proposals in this PEP is to allow the above\nsample expressions to instead be written as:\n\n- value1 = expr1?.field.of.interest\n- value2 = expr2?[\"field\"][\"of\"][\"interest\"]\n- value3 = expr3 ?else expr4 ?else expr5\n\nIn these forms, almost all of the information presented to the reader is\nimmediately relevant to the question \"What does this code do?\", while\nthe boilerplate code to handle missing data by passing it through to the\noutput or falling back to an alternative input, has shrunk to two uses\nof the ? symbol and two uses of the ?else keyword.\n\nIn the first two examples, the 31 character boilerplate clause\nif exprN is not None else None (minimally 27 characters for a single\nletter variable name) has been replaced by a single ? character,\nsubstantially improving the signal-to-pattern-noise ratio of the lines\n(especially if it encourages the use of more meaningful variable and\nfield names rather than making them shorter purely for the sake of\nexpression brevity).\n\nIn the last example, two instances of the 21 character boilerplate,\nif exprN is not None (minimally 17 characters) are replaced with single\ncharacters, again substantially improving the signal-to-pattern-noise\nratio.\n\nFurthermore, each of our 5 \"subexpressions of potential interest\" is\nincluded exactly once, rather than 4 of them needing to be duplicated or\npulled out to a named variable in order to first check if they exist.\n\nThe existence checking precondition operator is mainly defined to\nprovide a clear conceptual basis for the existence checking attribute\naccess and subscripting operators:\n\n- obj?.attr is roughly equivalent to obj ?then obj.attr\n- obj?[expr] is roughly equivalent to obj ?then obj[expr]\n\nThe main semantic difference between the shorthand forms and their\nexpanded equivalents is that the common subexpression to the left of the\nexistence checking operator is evaluated only once in the shorthand form\n(similar to the benefit offered by augmented assignment statements).\n\nExistence checking assignment\n\nExistence-checking assignment is proposed as a relatively\nstraightforward expansion of the concepts in this PEP to also cover the\ncommon configuration handling idiom:\n\n- value = value if value is not None else expensive_default()\n\nby allowing that to instead be abbreviated as:\n\n- value ?= expensive_default()\n\nThis is mainly beneficial when the target is a subscript operation or\nsubattribute, as even without this specific change, the PEP would still\npermit this idiom to be updated to:\n\n- value = value ?else expensive_default()\n\nThe main argument against adding this form is that it's arguably\nambiguous and could mean either:\n\n- value = value ?else expensive_default(); or\n- value = value ?then value.subfield.of.interest\n\nThe second form isn't at all useful, but if this concern was deemed\nsignificant enough to address while still keeping the augmented\nassignment feature, the full keyword could be included in the syntax:\n\n- value ?else= expensive_default()\n\nAlternatively, augmented assignment could just be dropped from the\ncurrent proposal entirely and potentially reconsidered at a later date.\n\nExistence checking protocol\n\nThe existence checking protocol is including in this proposal primarily\nto allow for proxy objects (e.g. local representations of remote\nresources) and mock objects used in testing to correctly indicate\nnon-existence of target resources, even though the proxy or mock object\nitself is not None.\n\nHowever, with that protocol defined, it then seems natural to expand it\nto provide a type independent way of checking for NaN values in numeric\ntypes - at the moment you need to be aware of the exact data type you're\nworking with (e.g. builtin floats, builtin complex numbers, the decimal\nmodule) and use the appropriate operation (e.g. math.isnan, cmath.isnan,\ndecimal.getcontext().is_nan(), respectively)\n\nSimilarly, it seems reasonable to declare that the other placeholder\nbuiltin singletons, Ellipsis and NotImplemented, also qualify as objects\nthat represent the absence of data more so than they represent data.\n\nProposed symbolic notation\n\nPython has historically only had one kind of implied boolean context:\ntruth checking, which can be invoked directly via the bool() builtin. As\nthis PEP proposes a new kind of control flow operation based on\nexistence checking rather than truth checking, it is considered valuable\nto have a reminder directly in the code when existence checking is being\nused rather than truth checking.\n\nThe mathematical symbol for existence assertions is U+2203 'THERE\nEXISTS': ∃\n\nAccordingly, one possible approach to the syntactic additions proposed\nin this PEP would be to use that already defined mathematical notation:\n\n- expr1 ∃then expr2\n- expr1 ∃else expr2\n- obj∃.attr\n- obj∃[expr]\n- target ∃= expr\n\nHowever, there are two major problems with that approach, one practical,\nand one pedagogical.\n\nThe practical problem is the usual one that most keyboards don't offer\nany easy way of entering mathematical symbols other than those used in\nbasic arithmetic (even the symbols appearing in this PEP were ultimately\ncopied & pasted from[4] rather than being entered directly).\n\nThe pedagogical problem is that the symbols for existence assertions (∃)\nand universal assertions (∀) aren't going to be familiar to most people\nthe way basic arithmetic operators are, so we wouldn't actually be\nmaking the proposed syntax easier to understand by adopting ∃.\n\nBy contrast, ? is one of the few remaining unused ASCII punctuation\ncharacters in Python's syntax, making it available as a candidate\nsyntactic marker for \"this control flow operation is based on an\nexistence check, not a truth check\".\n\nTaking that path would also have the advantage of aligning Python's\nsyntax with corresponding syntax in other languages that offer similar\nfeatures.\n\nDrawing from the existing summary in PEP 505 and the Wikipedia articles\non the \"safe navigation operator[5] and the \"null coalescing\noperator\"[6], we see:\n\n- The ?. existence checking attribute access syntax precisely aligns\n with:\n - the \"safe navigation\" attribute access operator in C# (?.)\n - the \"optional chaining\" operator in Swift (?.)\n - the \"safe navigation\" attribute access operator in Groovy (?.)\n - the \"conditional member access\" operator in Dart (?.)\n- The ?[] existence checking attribute access syntax precisely aligns\n with:\n - the \"safe navigation\" subscript operator in C# (?[])\n - the \"optional subscript\" operator in Swift (?[].)\n- The ?else existence checking fallback syntax semantically aligns\n with:\n - the \"null-coalescing\" operator in C# (??)\n - the \"null-coalescing\" operator in PHP (??)\n - the \"nil-coalescing\" operator in Swift (??)\n\nTo be clear, these aren't the only spelling of these operators used in\nother languages, but they're the most common ones, and the ? symbol is\nthe most common syntactic marker by far (presumably prompted by the use\nof ? to introduce the \"then\" clause in C-style conditional expressions,\nwhich many of these languages also offer).\n\nProposed keywords\n\nGiven the symbolic marker ?, it would be syntactically unambiguous to\nspell the existence checking precondition and fallback operations using\nthe same keywords as their truth checking counterparts:\n\n- expr1 ?and expr2 (instead of expr1 ?then expr2)\n- expr1 ?or expr2 (instead of expr1 ?else expr2)\n\nHowever, while syntactically unambiguous when written, this approach\nmakes the code incredibly hard to pronounce (What's the pronunciation of\n\"?\"?) and also hard to describe (given reused keywords, there's no\nobvious shorthand terms for \"existence checking precondition (?and)\" and\n\"existence checking fallback (?or)\" that would distinguish them from\n\"logical conjunction (and)\" and \"logical disjunction (or)\").\n\nWe could try to encourage folks to pronounce the ? symbol as \"exists\",\nmaking the shorthand names the \"exists-and expression\" and the\n\"exists-or expression\", but there'd be no way of guessing those names\npurely from seeing them written in a piece of code.\n\nInstead, this PEP takes advantage of the proposed symbolic syntax to\nintroduce a new keyword (?then) and borrow an existing one (?else) in a\nway that allows people to refer to \"then expressions\" and \"else\nexpressions\" without ambiguity.\n\nThese keywords also align well with the conditional expressions that are\nsemantically equivalent to the proposed expressions.\n\nFor ?else expressions, expr1 ?else expr2 is equivalent to:\n\n _lhs_result = expr1\n _lhs_result if operator.exists(_lhs_result) else expr2\n\nHere the parallel is clear, since the else expr2 appears at the end of\nboth the abbreviated and expanded forms.\n\nFor ?then expressions, expr1 ?then expr2 is equivalent to:\n\n _lhs_result = expr1\n expr2 if operator.exists(_lhs_result) else _lhs_result\n\nHere the parallel isn't as immediately obvious due to Python's\ntraditionally anonymous \"then\" clauses (introduced by : in if statements\nand suffixed by if in conditional expressions), but it's still\nreasonably clear as long as you're already familiar with the\n\"if-then-else\" explanation of conditional control flow.\n\nRisks and concerns\n\nReadability\n\nLearning to read and write the new syntax effectively mainly requires\ninternalising two concepts:\n\n- expressions containing ? include an existence check and may short\n circuit\n- if None or another \"non-existent\" value is an expected input, and\n the correct handling is to propagate that to the result, then the\n existence checking operators are likely what you want\n\nCurrently, these concepts aren't explicitly represented at the language\nlevel, so it's a matter of learning to recognise and use the various\nidiomatic patterns based on conditional expressions and statements.\n\nMagic syntax\n\nThere's nothing about ? as a syntactic element that inherently suggests\nis not None or operator.exists. The main current use of ? as a symbol in\nPython code is as a trailing suffix in IPython environments to request\nhelp information for the result of the preceding expression.\n\nHowever, the notion of existence checking really does benefit from a\npervasive visual marker that distinguishes it from truth checking, and\nthat calls for a single-character symbolic syntax if we're going to do\nit at all.\n\nConceptual complexity\n\nThis proposal takes the currently ad hoc and informal concept of\n\"existence checking\" and elevates it to the status of being a syntactic\nlanguage feature with a clearly defined operator protocol.\n\nIn many ways, this should actually reduce the overall conceptual\ncomplexity of the language, as many more expectations will map correctly\nbetween truth checking with bool(expr) and existence checking with\noperator.exists(expr) than currently map between truth checking and\nexistence checking with expr is not None (or expr is not NotImplemented\nin the context of operand coercion, or the various NaN-checking\noperations in mathematical libraries).\n\nAs a simple example of the new parallels introduced by this PEP,\ncompare:\n\n all_are_true = all(map(bool, iterable))\n at_least_one_is_true = any(map(bool, iterable))\n all_exist = all(map(operator.exists, iterable))\n at_least_one_exists = any(map(operator.exists, iterable))\n\nDesign Discussion\n\nSubtleties in chaining existence checking expressions\n\nSimilar subtleties arise in chaining existence checking expressions as\nalready exist in chaining logical operators: the behaviour can be\nsurprising if the right hand side of one of the expressions in the chain\nitself returns a value that doesn't exist.\n\nAs a result, value = arg1 ?then f(arg1) ?else default() would be dubious\nfor essentially the same reason that value = cond and expr1 or expr2 is\ndubious: the former will evaluate default() if f(arg1) returns None,\njust as the latter will evaluate expr2 if expr1 evaluates to False in a\nboolean context.\n\nAmbiguous interaction with conditional expressions\n\nIn the proposal as currently written, the following is a syntax error:\n\n- value = f(arg) if arg ?else default\n\nWhile the following is a valid operation that checks a second condition\nif the first doesn't exist rather than merely being false:\n\n- value = expr1 if cond1 ?else cond2 else expr2\n\nThe expression chaining problem described above means that the argument\ncan be made that the first operation should instead be equivalent to:\n\n- value = f(arg) if operator.exists(arg) else default\n\nrequiring the second to be written in the arguably clearer form:\n\n- value = expr1 if (cond1 ?else cond2) else expr2\n\nAlternatively, the first form could remain a syntax error, and the\nexistence checking symbol could instead be attached to the if keyword:\n\n- value = expr1 if? cond else expr2\n\nExistence checking in other truth-checking contexts\n\nThe truth-checking protocol is currently used in the following syntactic\nconstructs:\n\n- logical conjunction (and-expressions)\n- logical disjunction (or-expressions)\n- conditional expressions (if-else expressions)\n- if statements\n- while loops\n- filter clauses in comprehensions and generator expressions\n\nIn the current PEP, switching from truth-checking with and and or to\nexistence-checking is a matter of substituting in the new keywords,\n?then and ?else in the appropriate places.\n\nFor other truth-checking contexts, it proposes either importing and\nusing the operator.exists API, or else continuing with the current idiom\nof checking specifically for expr is not None (or the context\nappropriate equivalent).\n\nThe simplest possible enhancement in that regard would be to elevate the\nproposed exists() API from an operator module function to a new builtin\nfunction.\n\nAlternatively, the ? existence checking symbol could be supported as a\nmodifier on the if and while keywords to indicate the use of an\nexistence check rather than a truth check.\n\nHowever, it isn't at all clear that the potential consistency benefits\ngained for either suggestion would justify the additional disruption, so\nthey've currently been omitted from the proposal.\n\nDefining expected invariant relations between __bool__ and __exists__\n\nThe PEP currently leaves the definition of __bool__ on all existing\ntypes unmodified, which ensures the entire proposal remains backwards\ncompatible, but results in the following cases where bool(obj) returns\nTrue, but the proposed operator.exists(obj) would return False:\n\n- NaN values for float, complex, and decimal.Decimal\n- Ellipsis\n- NotImplemented\n\nThe main argument for potentially changing these is that it becomes\neasier to reason about potential code behaviour if we have a recommended\ninvariant in place saying that values which indicate they don't exist in\nan existence checking context should also report themselves as being\nFalse in a truth checking context.\n\nFailing to define such an invariant would lead to arguably odd outcomes\nlike float(\"NaN\") ?else 0.0 returning 0.0 while float(\"NaN\") or 0.0\nreturns NaN.\n\nLimitations\n\nArbitrary sentinel objects\n\nThis proposal doesn't attempt to provide syntactic support for the\n\"sentinel object\" idiom, where None is a permitted explicit value, so a\nseparate sentinel object is defined to indicate missing values:\n\n _SENTINEL = object()\n def f(obj=_SENTINEL):\n return obj if obj is not _SENTINEL else default_value()\n\nThis could potentially be supported at the expense of making the\nexistence protocol definition significantly more complex, both to define\nand to use:\n\n- at the Python layer, operator.exists and __exists__ implementations\n would return the empty tuple to indicate non-existence, and\n otherwise return a singleton tuple containing a reference to the\n object to be used as the result of the existence check\n- at the C layer, tp_exists implementations would return NULL to\n indicate non-existence, and otherwise return a PyObject * pointer as\n the result of the existence check\n\nGiven that change, the sentinel object idiom could be rewritten as:\n\n class Maybe:\n SENTINEL = object()\n def __init__(self, value):\n self._result = (value,) is value is not self.SENTINEL else ()\n def __exists__(self):\n return self._result\n\n def f(obj=Maybe.SENTINEL):\n return Maybe(obj) ?else default_value()\n\nHowever, I don't think cases where the 3 proposed standard sentinel\nvalues (i.e. None, Ellipsis and NotImplemented) can't be used are going\nto be anywhere near common enough for the additional protocol complexity\nand the loss of symmetry between __bool__ and __exists__ to be worth it.\n\nSpecification\n\nThe Abstract already gives the gist of the proposal and the Rationale\ngives some specific examples. If there's enough interest in the basic\nidea, then a full specification will need to provide a precise\ncorrespondence between the proposed syntactic sugar and the underlying\nconditional expressions that is sufficient to guide the creation of a\nreference implementation.\n\n...TBD...\n\nImplementation\n\nAs with PEP 505, actual implementation has been deferred pending\nin-principle interest in the idea of adding these operators - the\nimplementation isn't the hard part of these proposals, the hard part is\ndeciding whether or not this is a change where the long term benefits\nfor new and existing Python users outweigh the short term costs involved\nin the wider ecosystem (including developers of other implementations,\nlanguage curriculum developers, and authors of other Python related\neducational material) adjusting to the change.\n\n...TBD...\n\nReferences\n\nCopyright\n\nThis document has been placed in the public domain under the terms of\nthe CC0 1.0 license: https://creativecommons.org/publicdomain/zero/1.0/\n\n[1] python-ideas discussion thread\n(https://mail.python.org/pipermail/python-ideas/2016-October/043415.html)\n\n[2] Steven D'Aprano's critique of the proposal\n(https://mail.python.org/pipermail/python-ideas/2016-October/043453.html)\n\n[3] Considering a link to the idea of overloadable Boolean operators\n(https://mail.python.org/pipermail/python-ideas/2016-October/043447.html)\n\n[4] FileFormat.info: Unicode Character 'THERE EXISTS' (U+2203)\n(http://www.fileformat.info/info/unicode/char/2203/index.htm)\n\n[5] Wikipedia: Safe navigation operator\n(https://en.wikipedia.org/wiki/Safe_navigation_operator)\n\n[6] Wikipedia: Null coalescing operator\n(https://en.wikipedia.org/wiki/Null_coalescing_operator)"},"source":{"kind":"string","value":"python-peps"},"added":{"kind":"string","value":"2024-10-18T13:23:32.515219"},"created":{"kind":"timestamp","value":"2016-10-25T00:00:00","string":"2016-10-25T00:00:00"},"metadata":{"kind":"string","value":"{\n \"license\": \"Public Domain\",\n \"url\": \"https://peps.python.org/pep-0531/\",\n \"authors\": [\n \"Alyssa Coghlan\"\n ],\n \"pep_number\": \"0531\",\n \"pandoc_version\": \"3.5\"\n}"}}},{"rowIdx":533,"cells":{"id":{"kind":"string","value":"0645"},"text":{"kind":"string","value":"PEP: 645 Title: Allow writing optional types as x? Author: Maggie Moss\n Sponsor: Guido van Rossum \nStatus: Withdrawn Type: Standards Track Content-Type: text/x-rst\nCreated: 25-Aug-2020 Resolution:\nhttps://mail.python.org/archives/list/typing-sig@python.org/message/E75SPV6DDHLEEFSA5MBN5HUOQWDMUQJ2/\n\nAbstract\n\nThis PEP proposes adding a ? operator for types to allow writing int? in\nplace of Optional[int].\n\nPEP Withdrawal\n\nThe notation T|None introduced by PEP 604 to write Optional[T] is a fine\nalternative to T? and does not require new syntax.\n\nUsing T? to mean T|None is also inconsistent with TypeScript where it\nroughly means NotRequired[T]. Such inconsistency would likely confuse\nfolks coming from TypeScript to Python.\n\nThe above represents the consensus of typing-sig and the sponsor of this\nPEP.\n\nMotivation\n\nTypes have become a valuable and powerful part of the Python language.\nHowever, many type annotations are verbose and add considerable friction\nto using type annotations. By improving the typing syntax, adding types\nto Python code becomes simpler and improves the development experience\nfor Python users.\n\nIn a similar vein, a PEP to introduce short hand syntax for\nUnion types <604> has been approved and implemented.\n\nRationale\n\nTypes in Python can be quite verbose, this can be a hindrance when\nworking towards type adoption. Making types more ergonomic, as was done\nwith the Union type in PEP 604 (e.g., int | str), would reduce the\neffort needed to add types to new and existing Python code. The Optional\nannotation is used frequently in both partially and fully typed Python\ncode bases. In a small sampling of 5 well-typed open source projects, on\naverage 7% of annotations included at least one optional type. This\nindicates that updating the syntax has the potential to make types more\nconcise, reduce code length and improve readability.\n\nSimplifying the syntax for optionals has been discussed previously\nwithin the typing community. The consensus during these conversations\nhas been that ? is the preferred operator. There is no native support\nfor unary ? in Python and this will need to be added to the runtime.\n\nAdding the ? sigil to the Python grammar has been proposed previously in\nPEP 505, which is currently in a deferred state. PEP 505 proposes a:\n\n - \"None coalescing\" binary operator ??\n - \"None-aware attribute access\" operator ?. (\"maybe dot\")\n - \"None-aware indexing\" operator ?[] (\"maybe subscript\")\n\nShould PEP 505 be approved in the future, it would not interfere with\nthe typing specific ? proposed in this PEP. As well, since all uses of\nthe ? would be conceptually related, it would not be confusing in terms\nof learning Python or a hindrance to quick visual comprehension.\n\nThe proposed syntax, with the postfix operator, mimics the optional\nsyntax found in other typed languages, like C#, TypeScript and Swift.\nThe widespread adoption and popularity of these languages means that\nPython developers are likely already familiar with this syntax.\n\n // Optional in Swift\n var example: String?\n\n // Optional in C#\n string? example;\n\nAdding this syntax would also follow the often used pattern of using\nbuiltin types as annotations. For example, list, dict and None. This\nwould allow more annotations to be added to Python code without\nimporting from typing.\n\nSpecification\n\nThe new optional syntax should be accepted for function, variable,\nattribute and parameter annotations.\n\n # instead of\n # def foo(x: Optional[int], y: Optional[str], z: Optional[list[int]): ...\n def foo(x: int?, y: str?, x: list[int]?): ...\n\n # def bar(x: list[typing.Optional[int]]): ...\n def bar(x: list[int?]): ...\n\nThe new optional syntax should be equivalent to the existing\ntyping.Optional syntax\n\n typing.Optional[int] == int?\n\nThe new optional syntax should have the same identity as the existing\ntyping.Optional syntax.\n\n typing.Optional[int] is int?\n\nIt should also be equivalent to a Union with None.\n\n # old syntax\n int? == typing.Union[int, None]\n\n # new syntax\n int? == int | None\n\nSince the new Union syntax specified in PEP 604 is supported in\nisinstance and issubclass, the new optional syntax should be supported\nin both isinstance and issubclass,\n\n isinstance(1, int?) # true\n issubclass(Child, Super?) # true\n\nA new dunder method will need to be implemented to allow the ? operator\nto be overloaded for other functionality.\n\nBackwards Compatibility\n\n? is currently unused in Python syntax, therefore this PEP is fully\nbackwards compatible.\n\nReference Implementation\n\nA reference implementation can be found here.\n\nRejected Ideas\n\nDiscussed alternatives were\n\n- The ~ operator was considered in place of ?.\n- A prefix operator (?int).\n\nCopyright\n\nThis document is placed in the public domain or under the\nCC0-1.0-Universal license, whichever is more permissive."},"source":{"kind":"string","value":"python-peps"},"added":{"kind":"string","value":"2024-10-18T13:23:32.527205"},"created":{"kind":"timestamp","value":"2020-08-25T00:00:00","string":"2020-08-25T00:00:00"},"metadata":{"kind":"string","value":"{\n \"license\": \"Public Domain\",\n \"url\": \"https://peps.python.org/pep-0645/\",\n \"authors\": [\n \"Maggie Moss\"\n ],\n \"pep_number\": \"0645\",\n \"pandoc_version\": \"3.5\"\n}"}}},{"rowIdx":534,"cells":{"id":{"kind":"string","value":"0476"},"text":{"kind":"string","value":"PEP: 476 Title: Enabling certificate verification by default for stdlib\nhttp clients Version: $Revision$ Last-Modified: $Date$ Author: Alex\nGaynor Status: Final Type: Standards Track\nContent-Type: text/x-rst Created: 28-Aug-2014 Python-Version: 2.7.9,\n3.4.3, 3.5 Resolution:\nhttps://mail.python.org/pipermail/python-dev/2014-October/136676.html\n\nAbstract\n\nCurrently when a standard library http client (the urllib, urllib2,\nhttp, and httplib modules) encounters an https:// URL it will wrap the\nnetwork HTTP traffic in a TLS stream, as is necessary to communicate\nwith such a server. However, during the TLS handshake it will not\nactually check that the server has an X509 certificate is signed by a CA\nin any trust root, nor will it verify that the Common Name (or Subject\nAlternate Name) on the presented certificate matches the requested host.\n\nThe failure to do these checks means that anyone with a privileged\nnetwork position is able to trivially execute a man in the middle attack\nagainst a Python application using either of these HTTP clients, and\nchange traffic at will.\n\nThis PEP proposes to enable verification of X509 certificate signatures,\nas well as hostname verification for Python's HTTP clients by default,\nsubject to opt-out on a per-call basis. This change would be applied to\nPython 2.7, Python 3.4, and Python 3.5.\n\nRationale\n\nThe \"S\" in \"HTTPS\" stands for secure. When Python's users type \"HTTPS\"\nthey are expecting a secure connection, and Python should adhere to a\nreasonable standard of care in delivering this. Currently we are failing\nat this, and in doing so, APIs which appear simple are misleading users.\n\nWhen asked, many Python users state that they were not aware that Python\nfailed to perform these validations, and are shocked.\n\nThe popularity of requests (which enables these checks by default)\ndemonstrates that these checks are not overly burdensome in any way, and\nthe fact that it is widely recommended as a major security improvement\nover the standard library clients demonstrates that many expect a higher\nstandard for \"security by default\" from their tools.\n\nThe failure of various applications to note Python's negligence in this\nmatter is a source of regular CVE assignment[1][2][3][4][5][6][7][8]\n[9][10][11].\n\nTechnical Details\n\nPython would use the system provided certificate database on all\nplatforms. Failure to locate such a database would be an error, and\nusers would need to explicitly specify a location to fix it.\n\nThis will be achieved by adding a new ssl._create_default_https_context\nfunction, which is the same as ssl.create_default_context.\n\nhttp.client can then replace its usage of ssl._create_stdlib_context\nwith the ssl._create_default_https_context.\n\nAdditionally ssl._create_stdlib_context is renamed\nssl._create_unverified_context (an alias is kept around for backwards\ncompatibility reasons).\n\nTrust database\n\nThis PEP proposes using the system-provided certificate database.\nPrevious discussions have suggested bundling Mozilla's certificate\ndatabase and using that by default. This was decided against for several\nreasons:\n\n- Using the platform trust database imposes a lower maintenance burden\n on the Python developers -- shipping our own trust database would\n require doing a release every time a certificate was revoked.\n- Linux vendors, and other downstreams, would unbundle the Mozilla\n certificates, resulting in a more fragmented set of behaviors.\n- Using the platform stores makes it easier to handle situations such\n as corporate internal CAs.\n\nOpenSSL also has a pair of environment variables, SSL_CERT_DIR and\nSSL_CERT_FILE which can be used to point Python at a different\ncertificate database.\n\nBackwards compatibility\n\nThis change will have the appearance of causing some HTTPS connections\nto \"break\", because they will now raise an Exception during handshake.\n\nThis is misleading however, in fact these connections are presently\nfailing silently, an HTTPS URL indicates an expectation of\nconfidentiality and authentication. The fact that Python does not\nactually verify that the user's request has been made is a bug, further:\n\"Errors should never pass silently.\"\n\nNevertheless, users who have a need to access servers with self-signed\nor incorrect certificates would be able to do so by providing a context\nwith custom trust roots or which disables validation (documentation\nshould strongly recommend the former where possible). Users will also be\nable to add necessary certificates to system trust stores in order to\ntrust them globally.\n\nTwisted's 14.0 release made this same change, and it has been met with\nalmost no opposition.\n\nOpting out\n\nFor users who wish to opt out of certificate verification on a single\nconnection, they can achieve this by providing the context argument to\nurllib.urlopen:\n\n import ssl\n\n # This restores the same behavior as before.\n context = ssl._create_unverified_context()\n urllib.urlopen(\"https://no-valid-cert\", context=context)\n\nIt is also possible, though highly discouraged, to globally disable\nverification by monkeypatching the ssl module in versions of Python that\nimplement this PEP:\n\n import ssl\n\n try:\n _create_unverified_https_context = ssl._create_unverified_context\n except AttributeError:\n # Legacy Python that doesn't verify HTTPS certificates by default\n pass\n else:\n # Handle target environment that doesn't support HTTPS verification\n ssl._create_default_https_context = _create_unverified_https_context\n\nThis guidance is aimed primarily at system administrators that wish to\nadopt newer versions of Python that implement this PEP in legacy\nenvironments that do not yet support certificate verification on HTTPS\nconnections. For example, an administrator may opt out by adding the\nmonkeypatch above to sitecustomize.py in their Standard Operating\nEnvironment for Python. Applications and libraries SHOULD NOT be making\nthis change process wide (except perhaps in response to a system\nadministrator controlled configuration setting).\n\nParticularly security sensitive applications should always provide an\nexplicit application defined SSL context rather than relying on the\ndefault behaviour of the underlying Python implementation.\n\nOther protocols\n\nThis PEP only proposes requiring this level of validation for HTTP\nclients, not for other protocols such as SMTP.\n\nThis is because while a high percentage of HTTPS servers have correct\ncertificates, as a result of the validation performed by browsers, for\nother protocols self-signed or otherwise incorrect certificates are far\nmore common. Note that for SMTP at least, this appears to be changing\nand should be reviewed for a potential similar PEP in the future:\n\n- https://www.facebook.com/notes/protect-the-graph/the-current-state-of-smtp-starttls-deployment/1453015901605223\n- https://www.facebook.com/notes/protect-the-graph/massive-growth-in-smtp-starttls-deployment/1491049534468526\n\nPython Versions\n\nThis PEP describes changes that will occur on both the 3.4.x, 3.5 and\n2.7.X branches. For 2.7.X this will require backporting the context\n(SSLContext) argument to httplib, in addition to the features already\nbackported in PEP 466.\n\nImplementation\n\n- LANDED: Issue 22366 adds the context argument to\n urlib.request.urlopen.\n- Issue 22417 implements the substance of this PEP.\n\nCopyright\n\nThis document has been placed into the public domain.\n\n[1] https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2010-4340\n\n[2] https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2012-3533\n\n[3] https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2012-5822\n\n[4] https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2012-5825\n\n[5] https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2013-1909\n\n[6] https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2013-2037\n\n[7] https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2013-2073\n\n[8] https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2013-2191\n\n[9] https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2013-4111\n\n[10] https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2013-6396\n\n[11] https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2013-6444"},"source":{"kind":"string","value":"python-peps"},"added":{"kind":"string","value":"2024-10-18T13:23:32.539512"},"created":{"kind":"timestamp","value":"2014-08-28T00:00:00","string":"2014-08-28T00:00:00"},"metadata":{"kind":"string","value":"{\n \"license\": \"Public Domain\",\n \"url\": \"https://peps.python.org/pep-0476/\",\n \"authors\": [\n \"Alex Gaynor\"\n ],\n \"pep_number\": \"0476\",\n \"pandoc_version\": \"3.5\"\n}"}}},{"rowIdx":535,"cells":{"id":{"kind":"string","value":"0339"},"text":{"kind":"string","value":"PEP: 339 Title: Design of the CPython Compiler Version: $Revision$\nLast-Modified: $Date$ Author: Brett Cannon Status:\nWithdrawn Type: Informational Content-Type: text/x-rst Created:\n02-Feb-2005 Post-History:\n\nNote\n\nThis PEP has been withdrawn and moved to the Python developer's guide.\n\nAbstract\n\nHistorically (through 2.4), compilation from source code to bytecode\ninvolved two steps:\n\n1. Parse the source code into a parse tree (Parser/pgen.c)\n2. Emit bytecode based on the parse tree (Python/compile.c)\n\nHistorically, this is not how a standard compiler works. The usual steps\nfor compilation are:\n\n1. Parse source code into a parse tree (Parser/pgen.c)\n2. Transform parse tree into an Abstract Syntax Tree (Python/ast.c)\n3. Transform AST into a Control Flow Graph (Python/compile.c)\n4. Emit bytecode based on the Control Flow Graph (Python/compile.c)\n\nStarting with Python 2.5, the above steps are now used. This change was\ndone to simplify compilation by breaking it into three steps. The\npurpose of this document is to outline how the latter three steps of the\nprocess works.\n\nThis document does not touch on how parsing works beyond what is needed\nto explain what is needed for compilation. It is also not exhaustive in\nterms of the how the entire system works. You will most likely need to\nread some source to have an exact understanding of all details.\n\nParse Trees\n\nPython's parser is an LL(1) parser mostly based on the implementation\nlaid out in the Dragon Book [Aho86].\n\nThe grammar file for Python can be found in Grammar/Grammar with the\nnumeric value of grammar rules are stored in Include/graminit.h. The\nnumeric values for types of tokens (literal tokens, such as :, numbers,\netc.) are kept in Include/token.h). The parse tree made up of node *\nstructs (as defined in Include/node.h).\n\nQuerying data from the node structs can be done with the following\nmacros (which are all defined in Include/token.h):\n\n- \n\n CHILD(node *, int)\n\n Returns the nth child of the node using zero-offset indexing\n\n- \n\n RCHILD(node *, int)\n\n Returns the nth child of the node from the right side; use\n negative numbers!\n\n- \n\n NCH(node *)\n\n Number of children the node has\n\n- \n\n STR(node *)\n\n String representation of the node; e.g., will return : for a\n COLON token\n\n- \n\n TYPE(node *)\n\n The type of node as specified in Include/graminit.h\n\n- \n\n REQ(node *, TYPE)\n\n Assert that the node is the type that is expected\n\n- \n\n LINENO(node *)\n\n retrieve the line number of the source code that led to the\n creation of the parse rule; defined in Python/ast.c\n\nTo tie all of this example, consider the rule for 'while':\n\n while_stmt: 'while' test ':' suite ['else' ':' suite]\n\nThe node representing this will have TYPE(node) == while_stmt and the\nnumber of children can be 4 or 7 depending on if there is an 'else'\nstatement. To access what should be the first ':' and require it be an\nactual ':' token, (REQ(CHILD(node, 2), COLON).\n\nAbstract Syntax Trees (AST)\n\nThe abstract syntax tree (AST) is a high-level representation of the\nprogram structure without the necessity of containing the source code;\nit can be thought of as an abstract representation of the source code.\nThe specification of the AST nodes is specified using the Zephyr\nAbstract Syntax Definition Language (ASDL) [Wang97].\n\nThe definition of the AST nodes for Python is found in the file\nParser/Python.asdl .\n\nEach AST node (representing statements, expressions, and several\nspecialized types, like list comprehensions and exception handlers) is\ndefined by the ASDL. Most definitions in the AST correspond to a\nparticular source construct, such as an 'if' statement or an attribute\nlookup. The definition is independent of its realization in any\nparticular programming language.\n\nThe following fragment of the Python ASDL construct demonstrates the\napproach and syntax:\n\n module Python\n {\n stmt = FunctionDef(identifier name, arguments args, stmt* body,\n expr* decorators)\n | Return(expr? value) | Yield(expr value)\n attributes (int lineno)\n }\n\nThe preceding example describes three different kinds of statements;\nfunction definitions, return statements, and yield statements. All three\nkinds are considered of type stmt as shown by '|' separating the various\nkinds. They all take arguments of various kinds and amounts.\n\nModifiers on the argument type specify the number of values needed; '?'\nmeans it is optional, '*' means 0 or more, no modifier means only one\nvalue for the argument and it is required. FunctionDef, for instance,\ntakes an identifier for the name, 'arguments' for args, zero or more\nstmt arguments for 'body', and zero or more expr arguments for\n'decorators'.\n\nDo notice that something like 'arguments', which is a node type, is\nrepresented as a single AST node and not as a sequence of nodes as with\nstmt as one might expect.\n\nAll three kinds also have an 'attributes' argument; this is shown by the\nfact that 'attributes' lacks a '|' before it.\n\nThe statement definitions above generate the following C structure type:\n\n typedef struct _stmt *stmt_ty;\n\n struct _stmt {\n enum { FunctionDef_kind=1, Return_kind=2, Yield_kind=3 } kind;\n union {\n struct {\n identifier name;\n arguments_ty args;\n asdl_seq *body;\n } FunctionDef;\n\n struct {\n expr_ty value;\n } Return;\n\n struct {\n expr_ty value;\n } Yield;\n } v;\n int lineno;\n }\n\nAlso generated are a series of constructor functions that allocate (in\nthis case) a stmt_ty struct with the appropriate initialization. The\n'kind' field specifies which component of the union is initialized. The\nFunctionDef() constructor function sets 'kind' to FunctionDef_kind and\ninitializes the 'name', 'args', 'body', and 'attributes' fields.\n\nMemory Management\n\nBefore discussing the actual implementation of the compiler, a\ndiscussion of how memory is handled is in order. To make memory\nmanagement simple, an arena is used. This means that a memory is pooled\nin a single location for easy allocation and removal. What this gives us\nis the removal of explicit memory deallocation. Because memory\nallocation for all needed memory in the compiler registers that memory\nwith the arena, a single call to free the arena is all that is needed to\ncompletely free all memory used by the compiler.\n\nIn general, unless you are working on the critical core of the compiler,\nmemory management can be completely ignored. But if you are working at\neither the very beginning of the compiler or the end, you need to care\nabout how the arena works. All code relating to the arena is in either\nInclude/pyarena.h or Python/pyarena.c .\n\nPyArena_New() will create a new arena. The returned PyArena structure\nwill store pointers to all memory given to it. This does the bookkeeping\nof what memory needs to be freed when the compiler is finished with the\nmemory it used. That freeing is done with PyArena_Free(). This needs to\nonly be called in strategic areas where the compiler exits.\n\nAs stated above, in general you should not have to worry about memory\nmanagement when working on the compiler. The technical details have been\ndesigned to be hidden from you for most cases.\n\nThe only exception comes about when managing a PyObject. Since the rest\nof Python uses reference counting, there is extra support added to the\narena to cleanup each PyObject that was allocated. These cases are very\nrare. However, if you've allocated a PyObject, you must tell the arena\nabout it by calling PyArena_AddPyObject().\n\nParse Tree to AST\n\nThe AST is generated from the parse tree (see Python/ast.c) using the\nfunction PyAST_FromNode().\n\nThe function begins a tree walk of the parse tree, creating various AST\nnodes as it goes along. It does this by allocating all new nodes it\nneeds, calling the proper AST node creation functions for any required\nsupporting functions, and connecting them as needed.\n\nDo realize that there is no automated nor symbolic connection between\nthe grammar specification and the nodes in the parse tree. No help is\ndirectly provided by the parse tree as in yacc.\n\nFor instance, one must keep track of which node in the parse tree one is\nworking with (e.g., if you are working with an 'if' statement you need\nto watch out for the ':' token to find the end of the conditional).\n\nThe functions called to generate AST nodes from the parse tree all have\nthe name ast_for_xx where xx is what the grammar rule that the function\nhandles (alias_for_import_name is the exception to this). These in turn\ncall the constructor functions as defined by the ASDL grammar and\ncontained in Python/Python-ast.c (which was generated by\nParser/asdl_c.py) to create the nodes of the AST. This all leads to a\nsequence of AST nodes stored in asdl_seq structs.\n\nFunction and macros for creating and using asdl_seq * types as found in\nPython/asdl.c and Include/asdl.h:\n\n- \n\n asdl_seq_new()\n\n Allocate memory for an asdl_seq for the specified length\n\n- \n\n asdl_seq_GET()\n\n Get item held at a specific position in an asdl_seq\n\n- \n\n asdl_seq_SET()\n\n Set a specific index in an asdl_seq to the specified value\n\n- \n\n asdl_seq_LEN(asdl_seq *)\n\n Return the length of an asdl_seq\n\nIf you are working with statements, you must also worry about keeping\ntrack of what line number generated the statement. Currently the line\nnumber is passed as the last parameter to each stmt_ty function.\n\nControl Flow Graphs\n\nA control flow graph (often referenced by its acronym, CFG) is a\ndirected graph that models the flow of a program using basic blocks that\ncontain the intermediate representation (abbreviated \"IR\", and in this\ncase is Python bytecode) within the blocks. Basic blocks themselves are\na block of IR that has a single entry point but possibly multiple exit\npoints. The single entry point is the key to basic blocks; it all has to\ndo with jumps. An entry point is the target of something that changes\ncontrol flow (such as a function call or a jump) while exit points are\ninstructions that would change the flow of the program (such as jumps\nand 'return' statements). What this means is that a basic block is a\nchunk of code that starts at the entry point and runs to an exit point\nor the end of the block.\n\nAs an example, consider an 'if' statement with an 'else' block. The\nguard on the 'if' is a basic block which is pointed to by the basic\nblock containing the code leading to the 'if' statement. The 'if'\nstatement block contains jumps (which are exit points) to the true body\nof the 'if' and the 'else' body (which may be NULL), each of which are\ntheir own basic blocks. Both of those blocks in turn point to the basic\nblock representing the code following the entire 'if' statement.\n\nCFGs are usually one step away from final code output. Code is directly\ngenerated from the basic blocks (with jump targets adjusted based on the\noutput order) by doing a post-order depth-first search on the CFG\nfollowing the edges.\n\nAST to CFG to Bytecode\n\nWith the AST created, the next step is to create the CFG. The first step\nis to convert the AST to Python bytecode without having jump targets\nresolved to specific offsets (this is calculated when the CFG goes to\nfinal bytecode). Essentially, this transforms the AST into Python\nbytecode with control flow represented by the edges of the CFG.\n\nConversion is done in two passes. The first creates the namespace\n(variables can be classified as local, free/cell for closures, or\nglobal). With that done, the second pass essentially flattens the CFG\ninto a list and calculates jump offsets for final output of bytecode.\n\nThe conversion process is initiated by a call to the function\nPyAST_Compile() in Python/compile.c . This function does both the\nconversion of the AST to a CFG and outputting final bytecode from the\nCFG. The AST to CFG step is handled mostly by two functions called by\nPyAST_Compile(); PySymtable_Build() and compiler_mod() . The former is\nin Python/symtable.c while the latter is in Python/compile.c .\n\nPySymtable_Build() begins by entering the starting code block for the\nAST (passed-in) and then calling the proper symtable_visit_xx function\n(with xx being the AST node type). Next, the AST tree is walked with the\nvarious code blocks that delineate the reach of a local variable as\nblocks are entered and exited using symtable_enter_block() and\nsymtable_exit_block(), respectively.\n\nOnce the symbol table is created, it is time for CFG creation, whose\ncode is in Python/compile.c . This is handled by several functions that\nbreak the task down by various AST node types. The functions are all\nnamed compiler_visit_xx where xx is the name of the node type (such as\nstmt, expr, etc.). Each function receives a struct compiler * and xx_ty\nwhere xx is the AST node type. Typically these functions consist of a\nlarge 'switch' statement, branching based on the kind of node type\npassed to it. Simple things are handled inline in the 'switch' statement\nwith more complex transformations farmed out to other functions named\ncompiler_xx with xx being a descriptive name of what is being handled.\n\nWhen transforming an arbitrary AST node, use the VISIT() macro. The\nappropriate compiler_visit_xx function is called, based on the value\npassed in for (so VISIT(c, expr, node) calls\ncompiler_visit_expr(c, node)). The VISIT_SEQ macro is very similar, but\nis called on AST node sequences (those values that were created as\narguments to a node that used the '*' modifier). There is also\nVISIT_SLICE() just for handling slices.\n\nEmission of bytecode is handled by the following macros:\n\n- \n\n ADDOP()\n\n add a specified opcode\n\n- \n\n ADDOP_I()\n\n add an opcode that takes an argument\n\n- \n\n ADDOP_O(struct compiler *c, int op, PyObject *type, PyObject *obj)\n\n add an opcode with the proper argument based on the position of\n the specified PyObject in PyObject sequence object, but with no\n handling of mangled names; used for when you need to do named\n lookups of objects such as globals, consts, or parameters where\n name mangling is not possible and the scope of the name is known\n\n- \n\n ADDOP_NAME()\n\n just like ADDOP_O, but name mangling is also handled; used for\n attribute loading or importing based on name\n\n- \n\n ADDOP_JABS()\n\n create an absolute jump to a basic block\n\n- \n\n ADDOP_JREL()\n\n create a relative jump to a basic block\n\nSeveral helper functions that will emit bytecode and are named\ncompiler_xx() where xx is what the function helps with (list, boolop,\netc.). A rather useful one is compiler_nameop(). This function looks up\nthe scope of a variable and, based on the expression context, emits the\nproper opcode to load, store, or delete the variable.\n\nAs for handling the line number on which a statement is defined, is\nhandled by compiler_visit_stmt() and thus is not a worry.\n\nIn addition to emitting bytecode based on the AST node, handling the\ncreation of basic blocks must be done. Below are the macros and\nfunctions used for managing basic blocks:\n\n- \n\n NEW_BLOCK()\n\n create block and set it as current\n\n- \n\n NEXT_BLOCK()\n\n basically NEW_BLOCK() plus jump from current block\n\n- \n\n compiler_new_block()\n\n create a block but don't use it (used for generating jumps)\n\nOnce the CFG is created, it must be flattened and then final emission of\nbytecode occurs. Flattening is handled using a post-order depth-first\nsearch. Once flattened, jump offsets are backpatched based on the\nflattening and then a PyCodeObject file is created. All of this is\nhandled by calling assemble() .\n\nIntroducing New Bytecode\n\nSometimes a new feature requires a new opcode. But adding new bytecode\nis not as simple as just suddenly introducing new bytecode in the AST ->\nbytecode step of the compiler. Several pieces of code throughout Python\ndepend on having correct information about what bytecode exists.\n\nFirst, you must choose a name and a unique identifier number. The\nofficial list of bytecode can be found in Include/opcode.h . If the\nopcode is to take an argument, it must be given a unique number greater\nthan that assigned to HAVE_ARGUMENT (as found in Include/opcode.h).\n\nOnce the name/number pair has been chosen and entered in\nInclude/opcode.h, you must also enter it into Lib/opcode.py and\nDoc/library/dis.rst .\n\nWith a new bytecode you must also change what is called the magic number\nfor .pyc files. The variable MAGIC in Python/import.c contains the\nnumber. Changing this number will lead to all .pyc files with the old\nMAGIC to be recompiled by the interpreter on import.\n\nFinally, you need to introduce the use of the new bytecode. Altering\nPython/compile.c and Python/ceval.c will be the primary places to\nchange. But you will also need to change the 'compiler' package. The key\nfiles to do that are Lib/compiler/pyassem.py and\nLib/compiler/pycodegen.py .\n\nIf you make a change here that can affect the output of bytecode that is\nalready in existence and you do not change the magic number constantly,\nmake sure to delete your old .py(c|o) files! Even though you will end up\nchanging the magic number if you change the bytecode, while you are\ndebugging your work you will be changing the bytecode output without\nconstantly bumping up the magic number. This means you end up with stale\n.pyc files that will not be recreated. Running\nfind . -name '*.py[co]' -exec rm -f {} ';' should delete all .pyc files\nyou have, forcing new ones to be created and thus allow you test out\nyour new bytecode properly.\n\nCode Objects\n\nThe result of PyAST_Compile() is a PyCodeObject which is defined in\nInclude/code.h . And with that you now have executable Python bytecode!\n\nThe code objects (byte code) is executed in Python/ceval.c . This file\nwill also need a new case statement for the new opcode in the big switch\nstatement in PyEval_EvalFrameEx().\n\nImportant Files\n\n- Parser/\n - \n\n Python.asdl\n\n ASDL syntax file\n\n - \n\n asdl.py\n\n \"An implementation of the Zephyr Abstract Syntax Definition\n Language.\" Uses SPARK to parse the ASDL files.\n\n - \n\n asdl_c.py\n\n \"Generate C code from an ASDL description.\" Generates\n Python/Python-ast.c and Include/Python-ast.h .\n\n - \n\n spark.py\n\n SPARK parser generator\n- Python/\n - \n\n Python-ast.c\n\n Creates C structs corresponding to the ASDL types. Also\n contains code for marshaling AST nodes (core ASDL types have\n marshaling code in asdl.c). \"File automatically generated by\n Parser/asdl_c.py\". This file must be committed separately\n after every grammar change is committed since the\n __version__ value is set to the latest grammar change\n revision number.\n\n - \n\n asdl.c\n\n Contains code to handle the ASDL sequence type. Also has\n code to handle marshalling the core ASDL types, such as\n number and identifier. used by Python-ast.c for marshaling\n AST nodes.\n\n - \n\n ast.c\n\n Converts Python's parse tree into the abstract syntax tree.\n\n - \n\n ceval.c\n\n Executes byte code (aka, eval loop).\n\n - \n\n compile.c\n\n Emits bytecode based on the AST.\n\n - \n\n symtable.c\n\n Generates a symbol table from AST.\n\n - \n\n pyarena.c\n\n Implementation of the arena memory manager.\n\n - \n\n import.c\n\n Home of the magic number (named MAGIC) for bytecode\n versioning\n- Include/\n - \n\n Python-ast.h\n\n Contains the actual definitions of the C structs as\n generated by Python/Python-ast.c . \"Automatically generated\n by Parser/asdl_c.py\".\n\n - \n\n asdl.h\n\n Header for the corresponding Python/ast.c .\n\n - \n\n ast.h\n\n Declares PyAST_FromNode() external (from Python/ast.c).\n\n - \n\n code.h\n\n Header file for Objects/codeobject.c; contains definition of\n PyCodeObject.\n\n - \n\n symtable.h\n\n Header for Python/symtable.c . struct symtable and\n PySTEntryObject are defined here.\n\n - \n\n pyarena.h\n\n Header file for the corresponding Python/pyarena.c .\n\n - \n\n opcode.h\n\n Master list of bytecode; if this file is modified you must\n modify several other files accordingly (see \"Introducing New\n Bytecode\")\n- Objects/\n - \n\n codeobject.c\n\n Contains PyCodeObject-related code (originally in\n Python/compile.c).\n- Lib/\n - \n\n opcode.py\n\n One of the files that must be modified if Include/opcode.h\n is.\n\n - compiler/\n - \n\n pyassem.py\n\n One of the files that must be modified if\n Include/opcode.h is changed.\n\n - \n\n pycodegen.py\n\n One of the files that must be modified if\n Include/opcode.h is changed.\n\nKnown Compiler-related Experiments\n\nThis section lists known experiments involving the compiler (including\nbytecode).\n\nSkip Montanaro presented a paper at a Python workshop on a peephole\noptimizer [1].\n\nMichael Hudson has a non-active SourceForge project named Bytecodehacks\n[2] that provides functionality for playing with bytecode directly.\n\nAn opcode to combine the functionality of LOAD_ATTR/CALL_FUNCTION was\ncreated named CALL_ATTR[3]. Currently only works for classic classes and\nfor new-style classes rough benchmarking showed an actual slowdown\nthanks to having to support both classic and new-style classes.\n\nReferences\n\n\f\n\n Local Variables: mode: indented-text indent-tabs-mode: nil\n sentence-end-double-space: t fill-column: 80 End:\n\nAho86\n\n Alfred V. Aho, Ravi Sethi, Jeffrey D. Ullman.\n Compilers: Principles, Techniques, and Tools,\n http://www.amazon.com/exec/obidos/tg/detail/-/0201100886/104-0162389-6419108\n\nWang97\n\n Daniel C. Wang, Andrew W. Appel, Jeff L. Korn, and Chris S. Serra.\n The Zephyr Abstract Syntax Description Language. In Proceedings of\n the Conference on Domain-Specific Languages, pp. 213--227, 1997.\n\n[1] Skip Montanaro's Peephole Optimizer Paper\n(https://legacy.python.org/workshops/1998-11/proceedings/papers/montanaro/montanaro.html)\n\n[2] Bytecodehacks Project\n(http://bytecodehacks.sourceforge.net/bch-docs/bch/index.html)\n\n[3] CALL_ATTR opcode (https://bugs.python.org/issue709744)"},"source":{"kind":"string","value":"python-peps"},"added":{"kind":"string","value":"2024-10-18T13:23:32.585418"},"created":{"kind":"timestamp","value":"2005-02-02T00:00:00","string":"2005-02-02T00:00:00"},"metadata":{"kind":"string","value":"{\n \"license\": \"Public Domain\",\n \"url\": \"https://peps.python.org/pep-0339/\",\n \"authors\": [\n \"Brett Cannon\"\n ],\n \"pep_number\": \"0339\",\n \"pandoc_version\": \"3.5\"\n}"}}},{"rowIdx":536,"cells":{"id":{"kind":"string","value":"0381"},"text":{"kind":"string","value":"PEP: 381 Title: Mirroring infrastructure for PyPI Author: Tarek Ziadé\n, Martin von Löwis Status:\nWithdrawn Type: Standards Track Topic: Packaging Content-Type:\ntext/x-rst Created: 21-Mar-2009 Post-History:\n\nAbstract\n\nThis PEP describes a mirroring infrastructure for PyPI.\n\nPEP Withdrawal\n\nThe main PyPI web service was moved behind the Fastly caching CDN in May\n2013:\nhttps://mail.python.org/pipermail/distutils-sig/2013-May/020848.html\n\nSubsequently, this arrangement was formalised as an in-kind sponsorship\nwith the PSF, and the PSF has also taken on the task of risk management\nin the event that that sponsorship arrangement were to ever cease.\n\nThe download statistics that were previously provided directly on PyPI,\nare now published indirectly via Google Big Query:\nhttps://packaging.python.org/guides/analyzing-pypi-package-downloads/\n\nAccordingly, the mirroring proposal described in this PEP is no longer\nrequired, and has been marked as Withdrawn.\n\nRationale\n\nPyPI is hosting over 6000 projects and is used on a daily basis by\npeople to build applications. Especially systems like easy_install and\nzc.buildout make intensive usage of PyPI.\n\nFor people making intensive use of PyPI, it can act as a single point of\nfailure. People have started to set up some mirrors, both private and\npublic. Those mirrors are active mirrors, which means that they are\nbrowsing PyPI to get synced.\n\nIn order to make the system more reliable, this PEP describes:\n\n- the mirror listing and registering at PyPI\n- the pages a public mirror should maintain. These pages will be used\n by PyPI, in order to get hit counts and the last modified date.\n- how a mirror should synchronize with PyPI\n- how a client can implement a fail-over mechanism\n\nMirror listing and registering\n\nPeople that wants to mirror PyPI make a proposal on catalog-SIG. When a\nmirror is proposed on the mailing list, it is manually added in a mirror\nlist in the PyPI application after it has been checked to be compliant\nwith the mirroring rules.\n\nThe mirror list is provided as a list of host names of the form\n\n X.pypi.python.org\n\nThe values of X are the sequence a,b,c,...,aa,ab,... a.pypi.python.org\nis the master server; the mirrors start with b. A CNAME record\nlast.pypi.python.org points to the last host name. Mirror operators\nshould use a static address, and report planned changes to that address\nin advance to distutils-sig.\n\nThe new mirror also appears at http://pypi.python.org/mirrors which is a\nhuman-readable page that gives the list of mirrors. This page also\nexplains how to register a new mirror.\n\nStatistics page\n\nPyPI provides statistics on downloads at /stats. This page is calculated\ndaily by PyPI, by reading all mirrors' local stats and summing them.\n\nThe stats are presented in daily or monthly files, under /stats/days and\n/stats/months. Each file is a bzip2 file with these formats:\n\n- YYYY-MM-DD.bz2 for daily files\n- YYYY-MM.bz2 for monthly files\n\nExamples:\n\n- /stats/days/2008-11-06.bz2\n- /stats/days/2008-11-07.bz2\n- /stats/days/2008-11-08.bz2\n- /stats/months/2008-11.bz2\n- /stats/months/2008-10.bz2\n\nMirror Authenticity\n\nWith a distributed mirroring system, clients may want to verify that the\nmirrored copies are authentic. There are multiple threats to consider:\n\n1. the central index may get compromised\n2. the central index is assumed to be trusted, but the mirrors might be\n tampered.\n3. a man in the middle between the central index and the end user, or\n between a mirror and the end user might tamper with datagrams.\n\nThis specification only deals with the second threat. Some provisions\nare made to detect man-in-the-middle attacks. To detect the first\nattack, package authors need to sign their packages using PGP keys, so\nthat users verify that the package comes from the author they trust.\n\nThe central index provides a DSA key at the URL /serverkey, in the PEM\nformat as generated by \"openssl dsa -pubout\" (i.e. 3280\nSubjectPublicKeyInfo, with the algorithm 1.3.14.3.2.12). This URL must\nnot be mirrored, and clients must fetch the official serverkey from PyPI\ndirectly, or use the copy that came with the PyPI client software.\nMirrors should still download the key, to detect a key rollover.\n\nFor each package, a mirrored signature is provided at\n/serversig/. This is the DSA signature of the parallel URL\n/simple/, in DER form, using SHA-1 with DSA (i.e. as a 3279\nDsa-Sig-Value, created by algorithm 1.2.840.10040.4.3)\n\nClients using a mirror need to perform the following steps to verify a\npackage:\n\n1. download the /simple page, and compute its SHA-1 hash\n2. compute the DSA signature of that hash\n3. download the corresponding /serversig, and compare it\n (byte-for-byte) with the value computed in step 2.\n4. compute and verify (against the /simple page) the MD-5 hashes of all\n files they download from the mirror.\n\nAn implementation of the verification algorithm is available from\nhttps://svn.python.org/packages/trunk/pypi/tools/verify.py\n\nVerification is not needed when downloading from central index, and\nshould be avoided to reduce the computation overhead.\n\nAbout once a year, the key will be replaced with a new one. Mirrors will\nhave to re-fetch all /serversig pages. Clients using mirrors need to\nfind a trusted copy of the new server key. One way to obtain one is to\ndownload it from https://pypi.python.org/serverkey. To detect\nman-in-the-middle attacks, clients need to verify the SSL server\ncertificate, which will be signed by the CACert authority.\n\nSpecial pages a mirror needs to provide\n\nA mirror is a subset copy of PyPI, so it provides the same structure by\ncopying it.\n\n- simple: rest version of the package index\n- packages: packages, stored by Python version, and letters\n- serversig: signatures for the simple pages\n\nIt also needs to provide two specific elements:\n\n- last-modified\n- local-stats\n\nLast modified date\n\nCPAN uses a freshness date system where the mirror's last\nsynchronisation date is made available.\n\nFor PyPI, each mirror needs to maintain a URL with simple text content\nthat represents the last synchronisation date the mirror maintains.\n\nThe date is provided in GMT time, using the ISO 8601 format[1]. Each\nmirror will be responsible to maintain its last modified date.\n\nThis page must be located at : /last-modified and must be a text/plain\npage.\n\nLocal statistics\n\nEach mirror is responsible to count all the downloads that where done\nvia it. This is used by PyPI to sum up all downloads, to be able to\ndisplay the grand total.\n\nThese statistics are in CSV-like form, with a header in the first line.\nIt needs to obey PEP 305. Basically, it should be readable by Python's\ncsv module.\n\nThe fields in this file are:\n\n- package: the distutils id of the package.\n- filename: the filename that has been downloaded.\n- useragent: the User-Agent of the client that has downloaded the\n package.\n- count: the number of downloads.\n\nThe content will look like this:\n\n # package,filename,useragent,count\n zc.buildout,zc.buildout-1.6.0.tgz,MyAgent,142\n ...\n\nThe counting starts the day the mirror is launched, and there is one\nfile per day, compressed using the bzip2 format. Each file is named like\nthe day. For example, 2008-11-06.bz2 is the file for the 6th of November\n2008.\n\nThey are then provided in a folder called days. For example:\n\n- /local-stats/days/2008-11-06.bz2\n- /local-stats/days/2008-11-07.bz2\n- /local-stats/days/2008-11-08.bz2\n\nThis page must be located at /local-stats.\n\nHow a mirror should synchronize with PyPI\n\nA mirroring protocol called Simple Index was described and implemented\nby Martin v. Loewis and Jim Fulton, based on how easy_install works.\nThis section synthesizes it and gives a few relevant links, plus a small\npart about User-Agent.\n\nThe mirroring protocol\n\nMirrors must reduce the amount of data transferred between the central\nserver and the mirror. To achieve that, they MUST use the changelog()\nPyPI XML-RPC call, and only refetch the packages that have been changed\nsince the last time. For each package P, they MUST copy documents\n/simple/P/ and /serversig/P. If a package is deleted on the central\nserver, they MUST delete the package and all associated files. To detect\nmodification of package files, they MAY cache the file's ETag, and MAY\nrequest skipping it using the If-none-match header.\n\nEach mirroring tool MUST identify itself using a descripte User-agent\nheader.\n\nThe pep381client package[2] provides an application that respects this\nprotocol to browse PyPI.\n\nUser-agent request header\n\nIn order to be able to differentiate actions taken by clients over PyPI,\na specific user agent name should be provided by all mirroring software.\n\nThis is also true for all clients like:\n\n- zc.buildout[3].\n- setuptools[4].\n- pip[5].\n\nXXX user agent registering mechanism at PyPI ?\n\nHow a client can use PyPI and its mirrors\n\nClients that are browsing PyPI should be able to use alternative\nmirrors, by getting the list of the mirrors using last.pypi.python.org.\n\nCode example:\n\n >>> import socket\n >>> socket.gethostbyname_ex('last.pypi.python.org')[0]\n 'h.pypi.python.org'\n\nThe clients so far that could use this mechanism:\n\n- setuptools\n- zc.buildout (through setuptools)\n- pip\n\nFail-over mechanism\n\nClients that are browsing PyPI should be able to use a fail-over\nmechanism when PyPI or the used mirror is not responding.\n\nIt is up to the client to decide which mirror should be used, maybe by\nlooking at its geographical location and its responsiveness.\n\nThis PEP does not describe how this fail-over mechanism should work, but\nit is strongly encouraged that the clients try to use the nearest\nmirror.\n\nThe clients so far that could use this mechanism:\n\n- setuptools\n- zc.buildout (through setuptools)\n- pip\n\nExtra package indexes\n\nIt is obvious that some packages will not be uploaded to PyPI, whether\nbecause they are private or whether because the project maintainer runs\ntheir own server where people might get the project package. However, it\nis strongly encouraged that a public package index follows PyPI and\nDistutils protocols.\n\nIn other words, the register and upload command should be compatible\nwith any package index server out there.\n\nSoftware that are compatible with PyPI and Distutils so far:\n\n- PloneSoftwareCenter[6] which is used to run plone.org products\n section.\n- EggBasket[7].\n\nAn extra package index is not a mirror of PyPI, but can have some\nmirrors itself.\n\nMerging several indexes\n\nWhen a client needs to get some packages from several distinct indexes,\nit should be able to use each one of them as a potential source of\npackages. Different indexes should be defined as a sorted list for the\nclient to look for a package.\n\nEach independent index can of course provide a list of its mirrors.\n\nXXX define how to get the hostname for the mirrors of an arbitrary\nindex.\n\nThat permits all combinations at client level, for a reliable packaging\nsystem with all levels of privacy.\n\nIt is up the client to deal with the merging.\n\nReferences\n\nAcknowledgments\n\nGeorg Brandl.\n\nCopyright\n\nThis document has been placed in the public domain.\n\n[1] http://en.wikipedia.org/wiki/ISO_8601\n\n[2] http://pypi.python.org/pypi/pep381client\n\n[3] http://pypi.python.org/pypi/zc.buildout\n\n[4] http://pypi.python.org/pypi/setuptools\n\n[5] http://pypi.python.org/pypi/pip\n\n[6] http://plone.org/products/plonesoftwarecenter\n\n[7] http://www.chrisarndt.de/projects/eggbasket"},"source":{"kind":"string","value":"python-peps"},"added":{"kind":"string","value":"2024-10-18T13:23:32.608095"},"created":{"kind":"timestamp","value":"2009-03-21T00:00:00","string":"2009-03-21T00:00:00"},"metadata":{"kind":"string","value":"{\n \"license\": \"Public Domain\",\n \"url\": \"https://peps.python.org/pep-0381/\",\n \"authors\": [\n \"Martin von Löwis\",\n \"Tarek Ziadé\"\n ],\n \"pep_number\": \"0381\",\n \"pandoc_version\": \"3.5\"\n}"}}},{"rowIdx":537,"cells":{"id":{"kind":"string","value":"0698"},"text":{"kind":"string","value":"PEP: 698 Title: Override Decorator for Static Typing Author: Steven\nTroxler , Joshua Xu , Shannon\nZhu Sponsor: Jelle Zijlstra \nDiscussions-To:\nhttps://discuss.python.org/t/pep-698-a-typing-override-decorator/20839\nStatus: Final Type: Standards Track Topic: Typing Created: 05-Sep-2022\nPython-Version: 3.12 Post-History: 20-May-2022, 17-Aug-2022,\n11-Oct-2022, 07-Nov-2022, Resolution:\nhttps://discuss.python.org/t/pep-698-a-typing-override-decorator/20839/11\n\ntyping:override and @typing.override \n\nAbstract\n\nThis PEP proposes adding an @override decorator to the Python type\nsystem. This will allow type checkers to prevent a class of bugs that\noccur when a base class changes methods that are inherited by derived\nclasses.\n\nMotivation\n\nA primary purpose of type checkers is to flag when refactors or changes\nbreak pre-existing semantic structures in the code, so users can\nidentify and make fixes across their project without doing a manual\naudit of their code.\n\nSafe Refactoring\n\nPython’s type system does not provide a way to identify call sites that\nneed to be changed to stay consistent when an overridden function API\nchanges. This makes refactoring and transforming code more dangerous.\n\nConsider this simple inheritance structure:\n\n class Parent:\n def foo(self, x: int) -> int:\n return x\n\n class Child(Parent):\n def foo(self, x: int) -> int:\n return x + 1\n\n def parent_callsite(parent: Parent) -> None:\n parent.foo(1)\n\n def child_callsite(child: Child) -> None:\n child.foo(1)\n\nIf the overridden method on the superclass is renamed or deleted, type\ncheckers will only alert us to update call sites that deal with the base\ntype directly. But the type checker can only see the new code, not the\nchange we made, so it has no way of knowing that we probably also needed\nto rename the same method on child classes.\n\nA type checker will happily accept this code, even though we are likely\nintroducing bugs:\n\n class Parent:\n # Rename this method\n def new_foo(self, x: int) -> int:\n return x\n\n class Child(Parent):\n # This (unchanged) method used to override `foo` but is unrelated to `new_foo`\n def foo(self, x: int) -> int:\n return x + 1\n\n def parent_callsite(parent: Parent) -> None:\n # If we pass a Child instance we’ll now run Parent.new_foo - likely a bug\n parent.new_foo(1)\n\n def child_callsite(child: Child) -> None:\n # We probably wanted to invoke new_foo here. Instead, we forked the method\n child.foo(1)\n\nThis code will type check, but there are two potential sources of bugs:\n\n- If we pass a Child instance to the parent_callsite function, it will\n invoke the implementation in Parent.new_foo. rather than Child.foo.\n This is probably a bug - we presumably would not have written\n Child.foo in the first place if we didn’t need custom behavior.\n- Our system was likely relying on Child.foo behaving in a similar way\n to Parent.foo. But unless we catch this early, we have now forked\n the methods, and in future refactors it is likely no one will\n realize that major changes to the behavior of new_foo likely require\n updating Child.foo as well, which could lead to major bugs later.\n\nThe incorrectly-refactored code is type-safe, but is probably not what\nwe intended and could cause our system to behave incorrectly. The bug\ncan be difficult to track down because our new code likely does execute\nwithout throwing exceptions. Tests are less likely to catch the problem,\nand silent errors can take longer to track down in production.\n\nWe are aware of several production outages in multiple typed codebases\ncaused by such incorrect refactors. This is our primary motivation for\nadding an @override decorator to the type system, which lets developers\nexpress the relationship between Parent.foo and Child.foo so that type\ncheckers can detect the problem.\n\nRationale\n\nSubclass Implementations Become More Explicit\n\nWe believe that explicit overrides will make unfamiliar code easier to\nread than implicit overrides. A developer reading the implementation of\na subclass that uses @override can immediately see which methods are\noverriding functionality in some base class; without this decorator, the\nonly way to quickly find out is using a static analysis tool.\n\nPrecedent in Other Languages and Runtime Libraries\n\nStatic Override Checks in Other Languages\n\nMany popular programming languages support override checks. For example:\n\n- C++ has override.\n- C# has override.\n- Hack has <<__Override>>.\n- Java has @Override.\n- Kotlin has override.\n- Scala has override.\n- Swift has override.\n- TypeScript has override.\n\nRuntime Override Checks in Python\n\nToday, there is an Overrides library that provides decorators @overrides\n[sic] and @final and will enforce them at runtime.\n\nPEP 591 added a @final decorator with the same semantics as those in the\nOverrides library. But the override component of the runtime library is\nnot supported statically at all, which has added some confusion around\nthe mix/matched support.\n\nProviding support for @override in static checks would add value\nbecause:\n\n- Bugs can be caught earlier, often in-editor.\n- Static checks come with no performance overhead, unlike runtime\n checks.\n- Bugs will be caught quickly even in rarely-used modules, whereas\n with runtime checks these might go undetected for a time without\n automated tests of all imports.\n\nDisadvantages\n\nUsing @override will make code more verbose.\n\nSpecification\n\nWhen type checkers encounter a method decorated with @typing.override\nthey should treat it as a type error unless that method is overriding a\ncompatible method or attribute in some ancestor class.\n\n from typing import override\n\n class Parent:\n def foo(self) -> int:\n return 1\n\n def bar(self, x: str) -> str:\n return x\n\n class Child(Parent):\n @override\n def foo(self) -> int:\n return 2\n\n @override\n def baz(self) -> int: # Type check error: no matching signature in ancestor\n return 1\n\nThe @override decorator should be permitted anywhere a type checker\nconsiders a method to be a valid override, which typically includes not\nonly normal methods but also @property, @staticmethod, and @classmethod.\n\nNo New Rules for Override Compatibility\n\nThis PEP is exclusively concerned with the handling of the new @override\ndecorator, which specifies that the decorated method must override some\nattribute in an ancestor class. This PEP does not propose any new rules\nregarding the type signatures of such methods.\n\nStrict Enforcement Per-Project\n\nWe believe that @override is most useful if checkers also allow\ndevelopers to opt into a strict mode where methods that override a\nparent class are required to use the decorator. Strict enforcement\nshould be opt-in for backward compatibility.\n\nMotivation\n\nThe primary reason for a strict mode that requires @override is that\ndevelopers can only trust that refactors are override-safe if they know\nthat the @override decorator is used throughout the project.\n\nThere is another class of bug related to overrides that we can only\ncatch using a strict mode.\n\nConsider the following code:\n\n class Parent:\n pass\n\n class Child(Parent):\n def foo(self) -> int:\n return 2\n\nImagine we refactor it as follows:\n\n class Parent:\n def foo(self) -> int: # This method is new\n return 1\n\n class Child(Parent):\n def foo(self) -> int: # This is now an override!\n return 2\n\n def call_foo(parent: Parent) -> int:\n return parent.foo() # This could invoke Child.foo, which may be surprising.\n\nThe semantics of our code changed here, which could cause two problems:\n\n- If the author of the code change did not know that Child.foo already\n existed (which is very possible in a large codebase), they might be\n surprised to see that call_foo does not always invoke Parent.foo.\n- If the codebase authors tried to manually apply @override everywhere\n when writing overrides in subclasses, they are likely to miss the\n fact that Child.foo needs it here.\n\nAt first glance this kind of change may seem unlikely, but it can\nactually happen often if one or more subclasses have functionality that\ndevelopers later realize belongs in the base class.\n\nWith a strict mode, we will always alert developers when this occurs.\n\nPrecedent\n\nMost of the typed, object-oriented programming languages we looked at\nhave an easy way to require explicit overrides throughout a project:\n\n- C#, Kotlin, Scala, and Swift always require explicit overrides\n- TypeScript has a --no-implicit-override flag to force explicit\n overrides\n- In Hack and Java the type checker always treats overrides as opt-in,\n but widely-used linters can warn if explicit overrides are missing.\n\nBackward Compatibility\n\nBy default, the @override decorator will be opt-in. Codebases that do\nnot use it will type-check as before, without the additional type\nsafety.\n\nRuntime Behavior\n\nSet __override__ = True when possible\n\nAt runtime, @typing.override will make a best-effort attempt to add an\nattribute __override__ with value True to its argument. By \"best-effort\"\nwe mean that we will try adding the attribute, but if that fails (for\nexample because the input is a descriptor type with fixed slots) we will\nsilently return the argument as-is.\n\nThis is exactly what the @typing.final decorator does, and the\nmotivation is similar: it gives runtime libraries the ability to use\n@override. As a concrete example, a runtime library could check\n__override__ in order to automatically populate the __doc__ attribute of\nchild class methods using the parent method docstring.\n\nLimitations of setting __override__\n\nAs described above, adding __override__ may fail at runtime, in which\ncase we will simply return the argument as-is.\n\nIn addition, even in cases where it does work, it may be difficult for\nusers to correctly work with multiple decorators, because successfully\nensuring the __override__ attribute is set on the final output requires\nunderstanding the implementation of each decorator:\n\n- The @override decorator needs to execute after ordinary decorators\n like @functools.lru_cache that use wrapper functions, since we want\n to set __override__ on the outermost wrapper. This means it needs to\n go above all these other decorators.\n- But @override needs to execute before many special descriptor-based\n decorators like @property, @staticmethod, and @classmethod.\n- As discussed above, in some cases (for example a descriptor with\n fixed slots or a descriptor that also wraps) it may be impossible to\n set the __override__ attribute at all.\n\nAs a result, runtime support for setting __override__ is best effort\nonly, and we do not expect type checkers to validate the ordering of\ndecorators.\n\nRejected Alternatives\n\nRely on Integrated Development Environments for safety\n\nModern Integrated Development Environments (IDEs) often provide the\nability to automatically update subclasses when renaming a method. But\nwe view this as insufficient for several reasons:\n\n- If a codebase is split into multiple projects, an IDE will not help\n and the bug appears when upgrading dependencies. Type checkers are a\n fast way to catch breaking changes in dependencies.\n- Not all developers use such IDEs. And library maintainers, even if\n they do use an IDE, should not need to assume pull request authors\n use the same IDE. We prefer being able to detect problems in\n continuous integration without assuming anything about developers’\n choice of editor.\n\nRuntime enforcement\n\nWe considered having @typing.override enforce override safety at\nruntime, similarly to how @overrides.overrides does today.\n\nWe rejected this for four reasons:\n\n- For users of static type checking, it is not clear this brings any\n benefits.\n- There would be at least some performance overhead, leading to\n projects importing slower with runtime enforcement. We estimate the\n @overrides.overrides implementation takes around 100 microseconds,\n which is fast but could still add up to a second or more of extra\n initialization time in million-plus line codebases, which is exactly\n where we think @typing.override will be most useful.\n- An implementation may have edge cases where it doesn’t work well (we\n heard from a maintainer of one such closed-source library that this\n has been a problem). We expect static enforcement to be simple and\n reliable.\n- The implementation approaches we know of are not simple. The\n decorator executes before the class is finished evaluating, so the\n options we know of are either to inspect the bytecode of the caller\n (as @overrides.overrides does) or to use a metaclass-based approach.\n Neither approach seems ideal.\n\nMark a base class to force explicit overrides on subclasses\n\nWe considered including a class decorator @require_explicit_overrides,\nwhich would have provided a way for base classes to declare that all\nsubclasses must use the @override decorator on method overrides. The\nOverrides library has a mixin class, EnforceExplicitOverrides, which\nprovides similar behavior in runtime checks.\n\nWe decided against this because we expect owners of large codebases will\nbenefit most from @override, and for these use cases having a strict\nmode where explicit @override is required (see the Backward\nCompatibility section) provides more benefits than a way to mark base\nclasses.\n\nMoreover we believe that authors of projects who do not consider the\nextra type safety to be worth the additional boilerplate of using\n@override should not be forced to do so. Having an optional strict mode\nputs the decision in the hands of project owners, whereas the use of\n@require_explicit_overrides in libraries would force project owners to\nuse @override even if they prefer not to.\n\nInclude the name of the ancestor class being overridden\n\nWe considered allowing the caller of @override to specify a specific\nancestor class where the overridden method should be defined:\n\n class Parent0:\n def foo(self) -> int:\n return 1\n\n\n class Parent1:\n def bar(self) -> int:\n return 1\n\n\n class Child(Parent0, Parent1):\n @override(Parent0) # okay, Parent0 defines foo\n def foo(self) -> int:\n return 2\n\n @override(Parent0) # type error, Parent0 does not define bar\n def bar(self) -> int:\n return 2\n\nThis could be useful for code readability because it makes the override\nstructure more explicit for deep inheritance trees. It also might catch\nbugs by prompting developers to check that the implementation of an\noverride still makes sense whenever a method being overridden moves from\none base class to another.\n\nWe decided against it because:\n\n- Supporting this would add complexity to the implementation of both\n @override and type checker support for it, so there would need to be\n considerable benefits.\n- We believe that it would be rarely used and catch relatively few\n bugs.\n - The author of the Overrides package has noted that early\n versions of his library included this capability but it was\n rarely useful and seemed to have little benefit. After it was\n removed, the ability was never requested by users.\n\nReference Implementation\n\nPyre: A proof of concept is implemented in Pyre:\n\n- The decorator @pyre_extensions.override can mark overrides\n- Pyre can type-check this decorator as specified in this PEP\n\nCopyright\n\nThis document is placed in the public domain or under the\nCC0-1.0-Universal license, whichever is more permissive."},"source":{"kind":"string","value":"python-peps"},"added":{"kind":"string","value":"2024-10-18T13:23:32.648486"},"created":{"kind":"timestamp","value":"2022-09-05T00:00:00","string":"2022-09-05T00:00:00"},"metadata":{"kind":"string","value":"{\n \"license\": \"Public Domain\",\n \"url\": \"https://peps.python.org/pep-0698/\",\n \"authors\": [\n \"Steven Troxler\"\n ],\n \"pep_number\": \"0698\",\n \"pandoc_version\": \"3.5\"\n}"}}},{"rowIdx":538,"cells":{"id":{"kind":"string","value":"0439"},"text":{"kind":"string","value":"PEP: 439 Title: Inclusion of implicit pip bootstrap in Python\ninstallation Version: $Revision$ Last-Modified: $Date$ Author: Richard\nJones BDFL-Delegate: Alyssa Coghlan\n Discussions-To: distutils-sig@python.org Status:\nRejected Type: Standards Track Topic: Packaging Content-Type: text/x-rst\nCreated: 18-Mar-2013 Python-Version: 3.4 Post-History: 19-Mar-2013\nResolution:\nhttps://mail.python.org/pipermail/distutils-sig/2013-August/022527.html\n\nAbstract\n\nThis PEP proposes the inclusion of a pip bootstrap executable in the\nPython installation to simplify the use of 3rd-party modules by Python\nusers.\n\nThis PEP does not propose to include the pip implementation in the\nPython standard library. Nor does it propose to implement any package\nmanagement or installation mechanisms beyond those provided by PEP 427\n(\"The Wheel Binary Package Format 1.0\") and TODO distlib PEP.\n\nPEP Rejection\n\nThis PEP has been rejected in favour of a more explicit mechanism that\nshould achieve the same end result in a more reliable fashion. The more\nexplicit bootstrapping mechanism is described in PEP 453.\n\nRationale\n\nCurrently the user story for installing 3rd-party Python modules is not\nas simple as it could be. It requires that all 3rd-party modules inform\nthe user of how to install the installer, typically via a link to the\ninstaller. That link may be out of date or the steps required to perform\nthe install of the installer may be enough of a roadblock to prevent the\nuser from further progress.\n\nLarge Python projects which emphasise a low barrier to entry have shied\naway from depending on third party packages because of the introduction\nof this potential stumbling block for new users.\n\nWith the inclusion of the package installer command in the standard\nPython installation the barrier to installing additional software is\nconsiderably reduced. It is hoped that this will therefore increase the\nlikelihood that Python projects will reuse third party software.\n\nThe Python community also has an issue of complexity around the current\nbootstrap procedure for pip and setuptools. They all have their own\nbootstrap download file with slightly different usages and even refer to\neach other in some cases. Having a single bootstrap which is common\namongst them all, with a simple usage, would be far preferable.\n\nIt is also hoped that this is reduces the number of proposals to include\nmore and more software in the Python standard library, and therefore\nthat more popular Python software is more easily upgradeable beyond\nrequiring Python installation upgrades.\n\nProposal\n\nThe bootstrap will install the pip implementation, setuptools by\ndownloading their installation files from PyPI.\n\nThis proposal affects two components of packaging: the pip bootstrap\nand, thanks to easier package installation, modifications to publishing\npackages.\n\nThe core of this proposal is that the user experience of using pip\nshould not require the user to install pip.\n\nThe pip bootstrap\n\nThe Python installation includes an executable called \"pip3\" (see PEP\n394 for naming rationale etc.) that attempts to import pip machinery. If\nit can then the pip command proceeds as normal. If it cannot it will\nbootstrap pip by downloading the pip implementation and setuptools wheel\nfiles. Hereafter the installation of the \"pip implementation\" will imply\ninstallation of setuptools and virtualenv. Once installed, the pip\ncommand proceeds as normal. Once the bootstrap process is complete the\n\"pip3\" command is no longer the bootstrap but rather the full pip\ncommand.\n\nA bootstrap is used in the place of a the full pip code so that we don't\nhave to bundle pip and also pip is upgradeable outside of the regular\nPython upgrade timeframe and processes.\n\nTo avoid issues with sudo we will have the bootstrap default to\ninstalling the pip implementation to the per-user site-packages\ndirectory defined in PEP 370 and implemented in Python 2.6/3.0. Since we\navoid installing to the system Python we also avoid conflicting with any\nother packaging system (on Linux systems, for example.) If the user is\ninside a PEP 405 virtual environment then the pip implementation will be\ninstalled into that virtual environment.\n\nThe bootstrap process will proceed as follows:\n\n1. The user system has Python (3.4+) installed. In the \"scripts\"\n directory of the Python installation there is the bootstrap script\n called \"pip3\".\n2. The user will invoke a pip command, typically \"pip3 install\n \", for example \"pip3 install Django\".\n3. The bootstrap script will attempt to import the pip implementation.\n If this succeeds, the pip command is processed normally. Stop.\n4. On failing to import the pip implementation the bootstrap notifies\n the user that it needs to \"install pip\". It will ask the user\n whether it should install pip as a system-wide site-packages or as a\n user-only package. This choice will also be present as a\n command-line option to pip so non-interactive use is possible.\n5. The bootstrap will and contact PyPI to obtain the latest download\n wheel file (see PEP 427.)\n6. Upon downloading the file it is installed using \"python setup.py\n install\".\n7. The pip tool may now import the pip implementation and continues to\n process the requested user command normally.\n\nUsers may be running in an environment which cannot access the public\nInternet and are relying solely on a local package repository. They\nwould use the \"-i\" (Base URL of Python Package Index) argument to the\n\"pip3 install\" command. This simply overrides the default index URL\npointing to PyPI.\n\nSome users may have no Internet access suitable for fetching the pip\nimplementation file. These users can manually download and install the\nsetuptools and pip tar files. Adding specific support for this use-case\nis unnecessary.\n\nThe download of the pip implementation install file will be performed\nsecurely. The transport from pypi.python.org will be done over HTTPS\nwith the CA certificate check performed. This facility will be present\nin Python 3.4+ using Operating System certificates (see PEP XXXX).\n\nBeyond those arguments controlling index location and download options,\nthe \"pip3\" bootstrap command may support further standard pip options\nfor verbosity, quietness and logging.\n\nThe \"pip3\" command will support two new command-line options that are\nused in the bootstrapping, and otherwise ignored. They control where the\npip implementation is installed:\n\n--bootstrap\n\n Install to the user's packages directory. The name of this option is\n chosen to promote it as the preferred installation option.\n\n--bootstrap-to-system\n\n Install to the system site-packages directory.\n\nThese command-line options will also need to be implemented, but\notherwise ignored, in the pip implementation.\n\nConsideration should be given to defaulting pip to install packages to\nthe user's packages directory if pip is installed in that location.\n\nThe \"--no-install\" option to the \"pip3\" command will not affect the\nbootstrapping process.\n\nModifications to publishing packages\n\nAn additional new Python package is proposed, \"pypublish\", which will be\na tool for publishing packages to PyPI. It would replace the current\n\"python setup.py register\" and \"python setup.py upload\" distutils\ncommands. Again because of the measured Python release cycle and\nextensive existing Python installations these commands are difficult to\nbugfix and extend. Additionally it is desired that the \"register\" and\n\"upload\" commands be able to be performed over HTTPS with certificate\nvalidation. Since shipping CA certificate keychains with Python is not\nreally feasible (updating the keychain is quite difficult to manage) it\nis desirable that those commands, and the accompanying keychain, be made\ninstallable and upgradeable outside of Python itself.\n\nThe existing distutils mechanisms for package registration and upload\nwould remain, though with a deprecation warning.\n\nImplementation\n\nThe changes to pip required by this PEP are being tracked in that\nproject's issue tracker[1]. Most notably, the addition of --bootstrap\nand --bootstrap-to-system to the pip command-line.\n\nIt would be preferable that the pip and setuptools projects distribute a\nwheel format download.\n\nThe required code for this implementation is the \"pip3\" command\ndescribed above. The additional pypublish can be developed outside of\nthe scope of this PEP's work.\n\nFinally, it would be desirable that \"pip3\" be ported to Python 2.6+ to\nallow the single command to replace existing pip, setuptools and\nvirtualenv (which would be added to the bootstrap) bootstrap scripts.\nHaving that bootstrap included in a future Python 2.7 release would also\nbe highly desirable.\n\nRisks\n\nThe key that is used to sign the pip implementation download might be\ncompromised and this PEP currently proposes no mechanism for key\nrevocation.\n\nThere is a Perl package installer also named \"pip\". It is quite rare and\nnot commonly used. The Fedora variant of Linux has historically named\nPython's \"pip\" as \"python-pip\" and Perl's \"pip\" as \"perl-pip\". This\npolicy has been altered[2] so that future and upgraded Fedora\ninstallations will use the name \"pip\" for Python's \"pip\". Existing\n(non-upgraded) installations will still have the old name for the Python\n\"pip\", though the potential for confusion is now much reduced.\n\nReferences\n\nAcknowledgments\n\nAlyssa Coghlan for her thoughts on the proposal and dealing with the Red\nHat issue.\n\nJannis Leidel and Carl Meyer for their thoughts. Marcus Smith for\nfeedback.\n\nMarcela Mašláňová for resolving the Fedora issue.\n\nCopyright\n\nThis document has been placed in the public domain.\n\n\f\n\n Local Variables: mode: indented-text indent-tabs-mode: nil\n sentence-end-double-space: t fill-column: 70 coding: utf-8 End:\n\n[1] pip issue tracking work needed for this PEP\nhttps://github.com/pypa/pip/issues/863\n\n[2] Fedora's python-pip package does not provide /usr/bin/pip\nhttps://bugzilla.redhat.com/show_bug.cgi?id=958377"},"source":{"kind":"string","value":"python-peps"},"added":{"kind":"string","value":"2024-10-18T13:23:32.660844"},"created":{"kind":"timestamp","value":"2013-03-18T00:00:00","string":"2013-03-18T00:00:00"},"metadata":{"kind":"string","value":"{\n \"license\": \"Public Domain\",\n \"url\": \"https://peps.python.org/pep-0439/\",\n \"authors\": [\n \"Richard Jones\"\n ],\n \"pep_number\": \"0439\",\n \"pandoc_version\": \"3.5\"\n}"}}},{"rowIdx":539,"cells":{"id":{"kind":"string","value":"0404"},"text":{"kind":"string","value":"PEP: 404 Title: Python 2.8 Un-release Schedule Author: Barry Warsaw\n Status: Final Type: Informational Topic: Release\nContent-Type: text/x-rst Created: 09-Nov-2011 Python-Version: 2.8\n\nAbstract\n\nThis document describes the un-development and un-release schedule for\nPython 2.8.\n\nUn-release Manager and Crew\n\n Position Name\n ------------------------ ------------------\n 2.8 Un-release Manager Cardinal Biggles\n\nUn-release Schedule\n\nThe current un-schedule is:\n\n- 2.8 final Never\n\nOfficial pronouncement\n\nRule number six: there is no official Python 2.8 release. There never\nwill be an official Python 2.8 release. It is an ex-release. Python 2.7\nis the end of the Python 2 line of development.\n\nUpgrade path\n\nThe official upgrade path from Python 2.7 is to Python 3.\n\nAnd Now For Something Completely Different\n\nIn all seriousness, there are important reasons why there won't be an\nofficial Python 2.8 release, and why you should plan to migrate instead\nto Python 3.\n\nPython is (as of this writing) more than 20 years old, and Guido and the\ncommunity have learned a lot in those intervening years. Guido's\noriginal concept for Python 3 was to make changes to the language\nprimarily to remove the warts that had grown in the preceding versions.\nPython 3 was not to be a complete redesign, but instead an evolution of\nthe language, and while maintaining full backward compatibility with\nPython 2 was explicitly off-the-table, neither were gratuitous changes\nin syntax or semantics acceptable. In most cases, Python 2 code can be\ntranslated fairly easily to Python 3, sometimes entirely mechanically by\nsuch tools as 2to3 (there's also a non-trivial subset of the language\nthat will run without modification on both 2.7 and 3.x).\n\nBecause maintaining multiple versions of Python is a significant drag on\nthe resources of the Python developers, and because the improvements to\nthe language and libraries embodied in Python 3 are so important, it was\ndecided to end the Python 2 lineage with Python 2.7. Thus, all new\ndevelopment occurs in the Python 3 line of development, and there will\nnever be an official Python 2.8 release. Python 2.7 will however be\nmaintained for longer than the usual period of time.\n\nHere are some highlights of the significant improvements in Python 3.\nYou can read in more detail on the differences between Python 2 and\nPython 3. There are also many good guides on porting from Python 2 to\nPython 3.\n\nStrings and bytes\n\nPython 2's basic original strings are called 8-bit strings, and they\nplay a dual role in Python 2 as both ASCII text and as byte sequences.\nWhile Python 2 also has a unicode string type, the fundamental ambiguity\nof the core string type, coupled with Python 2's default behavior of\nsupporting automatic coercion from 8-bit strings to unicode objects when\nthe two are combined, often leads to UnicodeErrors. Python 3's standard\nstring type is Unicode based, and Python 3 adds a dedicated bytes type,\nbut critically, no automatic coercion between bytes and unicode strings\nis provided. The closest the language gets to implicit coercion are a\nfew text-based APIs that assume a default encoding (usually UTF-8) if no\nencoding is explicitly stated. Thus, the core interpreter, its I/O\nlibraries, module names, etc. are clear in their distinction between\nunicode strings and bytes. Python 3's unicode support even extends to\nthe filesystem, so that non-ASCII file names are natively supported.\n\nThis string/bytes clarity is often a source of difficulty in\ntransitioning existing code to Python 3, because many third party\nlibraries and applications are themselves ambiguous in this distinction.\nOnce migrated though, most UnicodeErrors can be eliminated.\n\nNumbers\n\nPython 2 has two basic integer types, a native machine-sized int type,\nand an arbitrary length long type. These have been merged in Python 3\ninto a single int type analogous to Python 2's long type.\n\nIn addition, integer division now produces floating point numbers for\nnon-integer results.\n\nClasses\n\nPython 2 has two core class hierarchies, often called classic classes\nand new-style classes. The latter allow for such things as inheriting\nfrom the builtin basic types, support descriptor based tools like the\nproperty builtin and provide a generally more sane and coherent system\nfor dealing with multiple inheritance. Python 3 provided the opportunity\nto completely drop support for classic classes, so all classes in Python\n3 automatically use the new-style semantics (although that's a misnomer\nnow). There is no need to explicitly inherit from object or set the\ndefault metatype to enable them (in fact, setting a default metatype at\nthe module level is no longer supported - the default metatype is always\nobject).\n\nThe mechanism for explicitly specifying a metaclass has also changed to\nuse a metaclass keyword argument in the class header line rather than a\n__metaclass__ magic attribute in the class body.\n\nMultiple spellings\n\nThere are many cases in Python 2 where multiple spellings of some\nconstructs exist, such as repr() and backticks, or the two inequality\noperators != and <>. In all cases, Python 3 has chosen exactly one\nspelling and removed the other (e.g. repr() and != were kept).\n\nImports\n\nIn Python 3, implicit relative imports within packages are no longer\navailable - only absolute imports and explicit relative imports are\nsupported. In addition, star imports (e.g. from x import *) are only\npermitted in module level code.\n\nAlso, some areas of the standard library have been reorganized to make\nthe naming scheme more intuitive. Some rarely used builtins have been\nrelocated to standard library modules.\n\nIterators and views\n\nMany APIs, which in Python 2 returned concrete lists, in Python 3 now\nreturn iterators or lightweight views.\n\nCopyright\n\nThis document has been placed in the public domain."},"source":{"kind":"string","value":"python-peps"},"added":{"kind":"string","value":"2024-10-18T13:23:32.669940"},"created":{"kind":"timestamp","value":"2011-11-09T00:00:00","string":"2011-11-09T00:00:00"},"metadata":{"kind":"string","value":"{\n \"license\": \"Public Domain\",\n \"url\": \"https://peps.python.org/pep-0404/\",\n \"authors\": [\n \"Barry Warsaw\"\n ],\n \"pep_number\": \"0404\",\n \"pandoc_version\": \"3.5\"\n}"}}},{"rowIdx":540,"cells":{"id":{"kind":"string","value":"0554"},"text":{"kind":"string","value":"PEP: 554 Title: Multiple Interpreters in the Stdlib Author: Eric Snow\n Discussions-To:\nhttps://discuss.python.org/t/pep-554-multiple-interpreters-in-the-stdlib/24855\nStatus: Superseded Type: Standards Track Content-Type: text/x-rst\nCreated: 05-Sep-2017 Python-Version: 3.13 Post-History: 07-Sep-2017,\n08-Sep-2017, 13-Sep-2017, 05-Dec-2017, 04-May-2020, 14-Mar-2023,\n01-Nov-2023, Superseded-By: 734\n\nNote\n\nThis PEP effectively continues in a cleaner form in PEP 734. This PEP is\nkept as-is for the sake of the various sections of background\ninformation and deferred/rejected ideas that have been stripped from PEP\n734.\n\nAbstract\n\nCPython has supported multiple interpreters in the same process (AKA\n\"subinterpreters\") since version 1.5 (1997). The feature has been\navailable via the C-API. [c-api] Multiple interpreters operate in\nrelative isolation from one another, which facilitates novel alternative\napproaches to concurrency.\n\nThis proposal introduces the stdlib interpreters module. It exposes the\nbasic functionality of multiple interpreters already provided by the\nC-API, along with basic support for communicating between interpreters.\nThis module is especially relevant since PEP 684 introduced a\nper-interpreter GIL in Python 3.12.\n\nProposal\n\nSummary:\n\n- add a new stdlib module: \"interpreters\"\n- add concurrent.futures.InterpreterPoolExecutor\n- help for extension module maintainers\n\nThe \"interpreters\" Module\n\nThe interpreters module will provide a high-level interface to the\nmultiple interpreter functionality, and wrap a new low-level\n_interpreters (in the same way as the threading module). See the\nExamples section for concrete usage and use cases.\n\nAlong with exposing the existing (in CPython) multiple interpreter\nsupport, the module will also support a basic mechanism for passing data\nbetween interpreters. That involves setting \"shareable\" objects in the\n__main__ module of a target subinterpreter. Some such objects, like\nos.pipe(), may be used to communicate further. The module will also\nprovide a minimal implementation of \"channels\" as a demonstration of\ncross-interpreter communication.\n\nNote that objects are not shared between interpreters since they are\ntied to the interpreter in which they were created. Instead, the\nobjects' data is passed between interpreters. See the Shared Data and\nAPI For Communication sections for more details about\nsharing/communicating between interpreters.\n\nAPI summary for interpreters module\n\nHere is a summary of the API for the interpreters module. For a more\nin-depth explanation of the proposed classes and functions, see the\n\"interpreters\" Module API section below.\n\nFor creating and using interpreters:\n\n -----------------------------------------------------------------------\n signature description\n ------------------------------ ----------------------------------------\n list_all() -> [Interpreter] Get all existing interpreters.\n\n get_current() -> Interpreter Get the currently running interpreter.\n\n get_main() -> Interpreter Get the main interpreter.\n\n create() -> Interpreter Initialize a new (idle) Python\n interpreter.\n -----------------------------------------------------------------------\n\n+---------------------------+------------------------------------------+\n| signature | description |\n+===========================+==========================================+\n| class Interpreter | A single interpreter. |\n+---------------------------+------------------------------------------+\n| .id | The interpreter's ID (read-only). |\n+---------------------------+------------------------------------------+\n| .is_running() -> bool | Is the interpreter currently executing |\n| | code? |\n+---------------------------+------------------------------------------+\n| .close() | Finalize and destroy the interpreter. |\n+---------------------------+------------------------------------------+\n| .set_main_attrs(**kwargs) | Bind \"shareable\" objects in __main__. |\n+---------------------------+------------------------------------------+\n| .get_main_attr(name) | Get a \"shareable\" object from __main__. |\n+---------------------------+------------------------------------------+\n| .exec(src_str, /) | Run the given source code in the |\n| | interpreter |\n| | (in the current thread). |\n+---------------------------+------------------------------------------+\n\nFor communicating between interpreters:\n\n+---------------------------------------+------------------------------+\n| signature | description |\n+=======================================+==============================+\n| is_shareable(obj) -> Bool | Can the object's data be |\n| | passed |\n| | between interpreters? |\n+---------------------------------------+------------------------------+\n| create_ch | Create a new channel for |\n| annel() -> (RecvChannel, SendChannel) | passing |\n| | data between interpreters. |\n+---------------------------------------+------------------------------+\n\nconcurrent.futures.InterpreterPoolExecutor\n\nAn executor will be added that extends ThreadPoolExecutor to run\nper-thread tasks in subinterpreters. Initially, the only supported tasks\nwill be whatever Interpreter.exec() takes (e.g. a str script). However,\nwe may also support some functions, as well as eventually a separate\nmethod for pickling the task and arguments, to reduce friction (at the\nexpense of performance for short-running tasks).\n\nHelp for Extension Module Maintainers\n\nIn practice, an extension that implements multi-phase init (PEP 489) is\nconsidered isolated and thus compatible with multiple interpreters.\nOtherwise it is \"incompatible\".\n\nMany extension modules are still incompatible. The maintainers and users\nof such extension modules will both benefit when they are updated to\nsupport multiple interpreters. In the meantime, users may become\nconfused by failures when using multiple interpreters, which could\nnegatively impact extension maintainers. See Concerns below.\n\nTo mitigate that impact and accelerate compatibility, we will do the\nfollowing:\n\n- be clear that extension modules are not required to support use in\n multiple interpreters\n- raise ImportError when an incompatible module is imported in a\n subinterpreter\n- provide resources (e.g. docs) to help maintainers reach\n compatibility\n- reach out to the maintainers of Cython and of the most used\n extension modules (on PyPI) to get feedback and possibly provide\n assistance\n\nExamples\n\nRun isolated code in current OS thread\n\n interp = interpreters.create()\n print('before')\n interp.exec('print(\"during\")')\n print('after')\n\nRun in a different thread\n\n interp = interpreters.create()\n def run():\n interp.exec('print(\"during\")')\n t = threading.Thread(target=run)\n print('before')\n t.start()\n t.join()\n print('after')\n\nPre-populate an interpreter\n\n interp = interpreters.create()\n interp.exec(tw.dedent(\"\"\"\n import some_lib\n import an_expensive_module\n some_lib.set_up()\n \"\"\"))\n wait_for_request()\n interp.exec(tw.dedent(\"\"\"\n some_lib.handle_request()\n \"\"\"))\n\nHandling an exception\n\n interp = interpreters.create()\n try:\n interp.exec(tw.dedent(\"\"\"\n raise KeyError\n \"\"\"))\n except interpreters.RunFailedError as exc:\n print(f\"got the error from the subinterpreter: {exc}\")\n\nRe-raising an exception\n\n interp = interpreters.create()\n try:\n try:\n interp.exec(tw.dedent(\"\"\"\n raise KeyError\n \"\"\"))\n except interpreters.RunFailedError as exc:\n raise exc.__cause__\n except KeyError:\n print(\"got a KeyError from the subinterpreter\")\n\nNote that this pattern is a candidate for later improvement.\n\nInteract with the __main__ namespace\n\n interp = interpreters.create()\n interp.set_main_attrs(a=1, b=2)\n interp.exec(tw.dedent(\"\"\"\n res = do_something(a, b)\n \"\"\"))\n res = interp.get_main_attr('res')\n\nSynchronize using an OS pipe\n\n interp = interpreters.create()\n r1, s1 = os.pipe()\n r2, s2 = os.pipe()\n\n def task():\n interp.exec(tw.dedent(f\"\"\"\n import os\n os.read({r1}, 1)\n print('during B')\n os.write({s2}, '')\n \"\"\"))\n\n t = threading.thread(target=task)\n t.start()\n print('before')\n os.write(s1, '')\n print('during A')\n os.read(r2, 1)\n print('after')\n t.join()\n\nSharing a file descriptor\n\n interp = interpreters.create()\n with open('spamspamspam') as infile:\n interp.set_main_attrs(fd=infile.fileno())\n interp.exec(tw.dedent(f\"\"\"\n import os\n for line in os.fdopen(fd):\n print(line)\n \"\"\"))\n\nPassing objects via pickle\n\n interp = interpreters.create()\n r, s = os.pipe()\n interp.exec(tw.dedent(f\"\"\"\n import os\n import pickle\n reader = {r}\n \"\"\"))\n interp.exec(tw.dedent(\"\"\"\n data = b''\n c = os.read(reader, 1)\n while c != b'\\x00':\n while c != b'\\x00':\n data += c\n c = os.read(reader, 1)\n obj = pickle.loads(data)\n do_something(obj)\n c = os.read(reader, 1)\n \"\"\"))\n for obj in input:\n data = pickle.dumps(obj)\n os.write(s, data)\n os.write(s, b'\\x00')\n os.write(s, b'\\x00')\n\nCapturing an interpreter's stdout\n\n interp = interpreters.create()\n stdout = io.StringIO()\n with contextlib.redirect_stdout(stdout):\n interp.exec(tw.dedent(\"\"\"\n print('spam!')\n \"\"\"))\n assert(stdout.getvalue() == 'spam!')\n\n # alternately:\n interp.exec(tw.dedent(\"\"\"\n import contextlib, io\n stdout = io.StringIO()\n with contextlib.redirect_stdout(stdout):\n print('spam!')\n captured = stdout.getvalue()\n \"\"\"))\n captured = interp.get_main_attr('captured')\n assert(captured == 'spam!')\n\nA pipe (os.pipe()) could be used similarly.\n\nRunning a module\n\n interp = interpreters.create()\n main_module = mod_name\n interp.exec(f'import runpy; runpy.run_module({main_module!r})')\n\nRunning as script (including zip archives & directories)\n\n interp = interpreters.create()\n main_script = path_name\n interp.exec(f\"import runpy; runpy.run_path({main_script!r})\")\n\nUsing a channel to communicate\n\n tasks_recv, tasks = interpreters.create_channel()\n results, results_send = interpreters.create_channel()\n\n def worker():\n interp = interpreters.create()\n interp.set_main_attrs(tasks=tasks_recv, results=results_send)\n interp.exec(tw.dedent(\"\"\"\n def handle_request(req):\n ...\n\n def capture_exception(exc):\n ...\n\n while True:\n try:\n req = tasks.recv()\n except Exception:\n # channel closed\n break\n try:\n res = handle_request(req)\n except Exception as exc:\n res = capture_exception(exc)\n results.send_nowait(res)\n \"\"\"))\n threads = [threading.Thread(target=worker) for _ in range(20)]\n for t in threads:\n t.start()\n\n requests = ...\n for req in requests:\n tasks.send(req)\n tasks.close()\n\n for t in threads:\n t.join()\n\nSharing a memoryview (imagine map-reduce)\n\n data, chunksize = read_large_data_set()\n buf = memoryview(data)\n numchunks = (len(buf) + 1) / chunksize\n results = memoryview(b'\\0' * numchunks)\n\n tasks_recv, tasks = interpreters.create_channel()\n\n def worker():\n interp = interpreters.create()\n interp.set_main_attrs(data=buf, results=results, tasks=tasks_recv)\n interp.exec(tw.dedent(\"\"\"\n while True:\n try:\n req = tasks.recv()\n except Exception:\n # channel closed\n break\n resindex, start, end = req\n chunk = data[start: end]\n res = reduce_chunk(chunk)\n results[resindex] = res\n \"\"\"))\n t = threading.Thread(target=worker)\n t.start()\n\n for i in range(numchunks):\n if not workers_running():\n raise ...\n start = i * chunksize\n end = start + chunksize\n if end > len(buf):\n end = len(buf)\n tasks.send((start, end, i))\n tasks.close()\n t.join()\n\n use_results(results)\n\nRationale\n\nRunning code in multiple interpreters provides a useful level of\nisolation within the same process. This can be leveraged in a number of\nways. Furthermore, subinterpreters provide a well-defined framework in\nwhich such isolation may extended. (See PEP 684.)\n\nAlyssa (Nick) Coghlan explained some of the benefits through a\ncomparison with multi-processing [benefits]:\n\n [I] expect that communicating between subinterpreters is going\n to end up looking an awful lot like communicating between\n subprocesses via shared memory.\n\n The trade-off between the two models will then be that one still\n just looks like a single process from the point of view of the\n outside world, and hence doesn't place any extra demands on the\n underlying OS beyond those required to run CPython with a single\n interpreter, while the other gives much stricter isolation\n (including isolating C globals in extension modules), but also\n demands much more from the OS when it comes to its IPC\n capabilities.\n\n The security risk profiles of the two approaches will also be quite\n different, since using subinterpreters won't require deliberately\n poking holes in the process isolation that operating systems give\n you by default.\n\nCPython has supported multiple interpreters, with increasing levels of\nsupport, since version 1.5. While the feature has the potential to be a\npowerful tool, it has suffered from neglect because the multiple\ninterpreter capabilities are not readily available directly from Python.\nExposing the existing functionality in the stdlib will help reverse the\nsituation.\n\nThis proposal is focused on enabling the fundamental capability of\nmultiple interpreters, isolated from each other, in the same Python\nprocess. This is a new area for Python so there is relative uncertainly\nabout the best tools to provide as companions to interpreters. Thus we\nminimize the functionality we add in the proposal as much as possible.\n\nConcerns\n\n- \"subinterpreters are not worth the trouble\"\n\nSome have argued that subinterpreters do not add sufficient benefit to\njustify making them an official part of Python. Adding features to the\nlanguage (or stdlib) has a cost in increasing the size of the language.\nSo an addition must pay for itself.\n\nIn this case, multiple interpreter support provide a novel concurrency\nmodel focused on isolated threads of execution. Furthermore, they\nprovide an opportunity for changes in CPython that will allow\nsimultaneous use of multiple CPU cores (currently prevented by the\nGIL--see PEP 684).\n\nAlternatives to subinterpreters include threading, async, and\nmultiprocessing. Threading is limited by the GIL and async isn't the\nright solution for every problem (nor for every person). Multiprocessing\nis likewise valuable in some but not all situations. Direct IPC (rather\nthan via the multiprocessing module) provides similar benefits but with\nthe same caveat.\n\nNotably, subinterpreters are not intended as a replacement for any of\nthe above. Certainly they overlap in some areas, but the benefits of\nsubinterpreters include isolation and (potentially) performance. In\nparticular, subinterpreters provide a direct route to an alternate\nconcurrency model (e.g. CSP) which has found success elsewhere and will\nappeal to some Python users. That is the core value that the\ninterpreters module will provide.\n\n- \"stdlib support for multiple interpreters adds extra burden on C\n extension authors\"\n\nIn the Interpreter Isolation section below we identify ways in which\nisolation in CPython's subinterpreters is incomplete. Most notable is\nextension modules that use C globals to store internal state. (PEP 3121\nand PEP 489 provide a solution to that problem, followed by some extra\nAPIs that improve efficiency, e.g. PEP 573).\n\nConsequently, projects that publish extension modules may face an\nincreased maintenance burden as their users start using subinterpreters,\nwhere their modules may break. This situation is limited to modules that\nuse C globals (or use libraries that use C globals) to store internal\nstate. For numpy, the reported-bug rate is one every 6 months.\n[bug-rate]\n\nUltimately this comes down to a question of how often it will be a\nproblem in practice: how many projects would be affected, how often\ntheir users will be affected, what the additional maintenance burden\nwill be for projects, and what the overall benefit of subinterpreters is\nto offset those costs. The position of this PEP is that the actual extra\nmaintenance burden will be small and well below the threshold at which\nsubinterpreters are worth it.\n\n- \"creating a new concurrency API deserves much more thought and\n experimentation, so the new module shouldn't go into the stdlib\n right away, if ever\"\n\nIntroducing an API for a new concurrency model, like happened with\nasyncio, is an extremely large project that requires a lot of careful\nconsideration. It is not something that can be done as simply as this\nPEP proposes and likely deserves significant time on PyPI to mature.\n(See Nathaniel's post on python-dev.)\n\nHowever, this PEP does not propose any new concurrency API. At most it\nexposes minimal tools (e.g. subinterpreters, channels) which may be used\nto write code that follows patterns associated with (relatively)\nnew-to-Python concurrency models. Those tools could also be used as the\nbasis for APIs for such concurrency models. Again, this PEP does not\npropose any such API.\n\n- \"there is no point to exposing subinterpreters if they still share\n the GIL\"\n- \"the effort to make the GIL per-interpreter is disruptive and risky\"\n\nA common misconception is that this PEP also includes a promise that\ninterpreters will no longer share the GIL. When that is clarified, the\nnext question is \"what is the point?\". This is already answered at\nlength in this PEP. Just to be clear, the value lies in:\n\n * increase exposure of the existing feature, which helps improve\n the code health of the entire CPython runtime\n * expose the (mostly) isolated execution of interpreters\n * preparation for per-interpreter GIL\n * encourage experimentation\n\n- \"data sharing can have a negative impact on cache performance in\n multi-core scenarios\"\n\n(See [cache-line-ping-pong].)\n\nThis shouldn't be a problem for now as we have no immediate plans to\nactually share data between interpreters, instead focusing on copying.\n\nAbout Subinterpreters\n\nConcurrency\n\nConcurrency is a challenging area of software development. Decades of\nresearch and practice have led to a wide variety of concurrency models,\neach with different goals. Most center on correctness and usability.\n\nOne class of concurrency models focuses on isolated threads of execution\nthat interoperate through some message passing scheme. A notable example\nis Communicating Sequential Processes [CSP] (upon which Go's concurrency\nis roughly based). The intended isolation inherent to CPython's\ninterpreters makes them well-suited to this approach.\n\nShared Data\n\nCPython's interpreters are inherently isolated (with caveats explained\nbelow), in contrast to threads. So the same\ncommunicate-via-shared-memory approach doesn't work. Without an\nalternative, effective use of concurrency via multiple interpreters is\nsignificantly limited.\n\nThe key challenge here is that sharing objects between interpreters\nfaces complexity due to various constraints on object ownership,\nvisibility, and mutability. At a conceptual level it's easier to reason\nabout concurrency when objects only exist in one interpreter at a time.\nAt a technical level, CPython's current memory model limits how Python\nobjects may be shared safely between interpreters; effectively, objects\nare bound to the interpreter in which they were created. Furthermore,\nthe complexity of object sharing increases as interpreters become more\nisolated, e.g. after GIL removal (though this is mitigated somewhat for\nsome \"immortal\" objects (see PEP 683).\n\nConsequently, the mechanism for sharing needs to be carefully\nconsidered. There are a number of valid solutions, several of which may\nbe appropriate to support in Python's stdlib and C-API. Any such\nsolution is likely to share many characteristics with the others.\n\nIn the meantime, we propose here a minimal solution\n(Interpreter.set_main_attrs()), which sets some precedent for how\nobjects are shared. More importantly, it facilitates the introduction of\nmore advanced approaches later and allows them to coexist and cooperate.\nIn part to demonstrate that, we will provide a basic implementation of\n\"channels\", as a somewhat more advanced sharing solution.\n\nSeparate proposals may cover:\n\n- the addition of a public C-API based on the implementation\n Interpreter.set_main_attrs()\n- the addition of other sharing approaches to the \"interpreters\"\n module\n\nThe fundamental enabling feature for communication is that most objects\ncan be converted to some encoding of underlying raw data, which is safe\nto be passed between interpreters. For example, an int object can be\nturned into a C long value, sent to another interpreter, and turned back\ninto an int object there. As another example, None may be passed as-is.\n\nRegardless, the effort to determine the best way forward here is mostly\noutside the scope of this PEP. In the meantime, this proposal describes\na basic interim solution using pipes (os.pipe()), as well as providing a\ndedicated capability (\"channels\"). See API For Communication below.\n\nInterpreter Isolation\n\nCPython's interpreters are intended to be strictly isolated from each\nother. Each interpreter has its own copy of all modules, classes,\nfunctions, and variables. The same applies to state in C, including in\nextension modules. The CPython C-API docs explain more. [caveats]\n\nHowever, there are ways in which interpreters do share some state. First\nof all, some process-global state remains shared:\n\n- file descriptors\n- low-level env vars\n- process memory (though allocators are isolated)\n- builtin types (e.g. dict, bytes)\n- singletons (e.g. None)\n- underlying static module data (e.g. functions) for\n builtin/extension/frozen modules\n\nThere are no plans to change this.\n\nSecond, some isolation is faulty due to bugs or implementations that did\nnot take subinterpreters into account. This includes things like\nextension modules that rely on C globals. [cryptography] In these cases\nbugs should be opened (some are already):\n\n- readline module hook functions (http://bugs.python.org/issue4202)\n- memory leaks on re-init (http://bugs.python.org/issue21387)\n\nFinally, some potential isolation is missing due to the current design\nof CPython. Improvements are currently going on to address gaps in this\narea:\n\n- extensions using the PyGILState_* API are somewhat incompatible\n [gilstate]\n\nExisting Usage\n\nMultiple interpreter support has not been a widely used feature. In\nfact, there have been only a handful of documented cases of widespread\nusage, including mod_wsgi, OpenStack Ceph, and JEP. On the one hand,\nthese cases provide confidence that existing multiple interpreter\nsupport is relatively stable. On the other hand, there isn't much of a\nsample size from which to judge the utility of the feature.\n\nAlternate Python Implementations\n\nI've solicited feedback from various Python implementors about support\nfor subinterpreters. Each has indicated that they would be able to\nsupport multiple interpreters in the same process (if they choose to)\nwithout a lot of trouble. Here are the projects I contacted:\n\n- jython ([jython])\n- ironpython (personal correspondence)\n- pypy (personal correspondence)\n- micropython (personal correspondence)\n\n\"interpreters\" Module API\n\nThe module provides the following functions:\n\n list_all() -> [Interpreter]\n\n Return a list of all existing interpreters.\n\n get_current() => Interpreter\n\n Return the currently running interpreter.\n\n get_main() => Interpreter\n\n Return the main interpreter. If the Python implementation\n has no concept of a main interpreter then return None.\n\n create() -> Interpreter\n\n Initialize a new Python interpreter and return it.\n It will remain idle until something is run in it and always\n run in its own thread.\n\n is_shareable(obj) -> bool:\n\n Return True if the object may be \"shared\" between interpreters.\n This does not necessarily mean that the actual objects will be\n shared. Instead, it means that the objects' underlying data will\n be shared in a cross-interpreter way, whether via a proxy, a\n copy, or some other means.\n\nThe module also provides the following class:\n\n class Interpreter(id):\n\n id -> int:\n\n The interpreter's ID. (read-only)\n\n is_running() -> bool:\n\n Return whether or not the interpreter's \"exec()\" is currently\n executing code. Code running in subthreads is ignored.\n Calling this on the current interpreter will always return True.\n\n close():\n\n Finalize and destroy the interpreter.\n\n This may not be called on an already running interpreter.\n Doing so results in a RuntimeError.\n\n set_main_attrs(iterable_or_mapping, /):\n set_main_attrs(**kwargs):\n\n Set attributes in the interpreter's __main__ module\n corresponding to the given name-value pairs. Each value\n must be a \"shareable\" object and will be converted to a new\n object (e.g. copy, proxy) in whatever way that object's type\n defines. If an attribute with the same name is already set,\n it will be overwritten.\n\n This method is helpful for setting up an interpreter before\n calling exec().\n\n get_main_attr(name, default=None, /):\n\n Return the value of the corresponding attribute of the\n interpreter's __main__ module. If the attribute isn't set\n then the default is returned. If it is set, but the value\n isn't \"shareable\" then a ValueError is raised.\n\n This may be used to introspect the __main__ module, as well\n as a very basic mechanism for \"returning\" one or more results\n from Interpreter.exec().\n\n exec(source_str, /):\n\n Run the provided Python source code in the interpreter,\n in its __main__ module.\n\n This may not be called on an already running interpreter.\n Doing so results in a RuntimeError.\n\n An \"interp.exec()\" call is similar to a builtin exec() call\n (or to calling a function that returns None). Once\n \"interp.exec()\" completes, the code that called \"exec()\"\n continues executing (in the original interpreter). Likewise,\n if there is any uncaught exception then it effectively\n (see below) propagates into the code where ``interp.exec()``\n was called. Like exec() (and threads), but unlike function\n calls, there is no return value. If any \"return\" value from\n the code is needed, send the data out via a pipe (os.pipe())\n or channel or other cross-interpreter communication mechanism.\n\n The big difference from exec() or functions is that\n \"interp.exec()\" executes the code in an entirely different\n interpreter, with entirely separate state. The interpreters\n are completely isolated from each other, so the state of the\n original interpreter (including the code it was executing in\n the current OS thread) does not affect the state of the target\n interpreter (the one that will execute the code). Likewise,\n the target does not affect the original, nor any of its other\n threads.\n\n Instead, the state of the original interpreter (for this thread)\n is frozen, and the code it's executing code completely blocks.\n At that point, the target interpreter is given control of the\n OS thread. Then, when it finishes executing, the original\n interpreter gets control back and continues executing.\n\n So calling \"interp.exec()\" will effectively cause the current\n Python thread to completely pause. Sometimes you won't want\n that pause, in which case you should make the \"exec()\" call in\n another thread. To do so, add a function that calls\n \"interp.exec()\" and then run that function in a normal\n \"threading.Thread\".\n\n Note that the interpreter's state is never reset, neither\n before \"interp.exec()\" executes the code nor after. Thus the\n interpreter state is preserved between calls to\n \"interp.exec()\". This includes \"sys.modules\", the \"builtins\"\n module, and the internal state of C extension modules.\n\n Also note that \"interp.exec()\" executes in the namespace of the\n \"__main__\" module, just like scripts, the REPL, \"-m\", and\n \"-c\". Just as the interpreter's state is not ever reset, the\n \"__main__\" module is never reset. You can imagine\n concatenating the code from each \"interp.exec()\" call into one\n long script. This is the same as how the REPL operates.\n\n Supported code: source text.\n\nIn addition to the functionality of Interpreter.set_main_attrs(), the\nmodule provides a related way to pass data between interpreters:\nchannels. See Channels below.\n\nUncaught Exceptions\n\nRegarding uncaught exceptions in Interpreter.exec(), we noted that they\nare \"effectively\" propagated into the code where interp.exec() was\ncalled. To prevent leaking exceptions (and tracebacks) between\ninterpreters, we create a surrogate of the exception and its traceback\n(see traceback.TracebackException), set it to __cause__ on a new\ninterpreters.RunFailedError, and raise that.\n\nDirectly raising (a proxy of) the exception is problematic since it's\nharder to distinguish between an error in the interp.exec() call and an\nuncaught exception from the subinterpreter.\n\nInterpreter Restrictions\n\nEvery new interpreter created by interpreters.create() now has specific\nrestrictions on any code it runs. This includes the following:\n\n- importing an extension module fails if it does not implement\n multi-phase init\n- daemon threads may not be created\n- os.fork() is not allowed (so no multiprocessing)\n- os.exec*() is not allowed (but \"fork+exec\", a la subprocess is okay)\n\nNote that interpreters created with the existing C-API do not have these\nrestrictions. The same is true for the \"main\" interpreter, so existing\nuse of Python will not change.\n\nWe may choose to later loosen some of the above restrictions or provide\na way to enable/disable granular restrictions individually. Regardless,\nrequiring multi-phase init from extension modules will always be a\ndefault restriction.\n\nAPI For Communication\n\nAs discussed in Shared Data above, multiple interpreter support is less\nuseful without a mechanism for sharing data (communicating) between\nthem. Sharing actual Python objects between interpreters, however, has\nenough potential problems that we are avoiding support for that in this\nproposal. Nor, as mentioned earlier, are we adding anything more than a\nbasic mechanism for communication.\n\nThat mechanism is the Interpreter.set_main_attrs() method. It may be\nused to set up global variables before Interpreter.exec() is called. The\nname-value pairs passed to set_main_attrs() are bound as attributes of\nthe interpreter's __main__ module. The values must be \"shareable\". See\nShareable Types below.\n\nAdditional approaches to communicating and sharing objects are enabled\nthrough Interpreter.set_main_attrs(). A shareable object could be\nimplemented which works like a queue, but with cross-interpreter safety.\nIn fact, this PEP does include an example of such an approach: channels.\n\nShareable Types\n\nAn object is \"shareable\" if its type supports shareable instances. The\ntype must implement a new internal protocol, which is used to convert an\nobject to interpreter-independent data and then converted back to an\nobject on the other side. Also see is_shareable() above.\n\nA minimal set of simple, immutable builtin types will be supported\ninitially, including:\n\n- None\n- bool\n- bytes\n- str\n- int\n- float\n\nWe will also support a small number of complex types initially:\n\n- memoryview, to allow sharing PEP 3118 buffers\n- channels\n\nFurther builtin types may be supported later, complex or not. Limiting\nthe initial shareable types is a practical matter, reducing the\npotential complexity of the initial implementation. There are a number\nof strategies we may pursue in the future to expand supported objects,\nonce we have more experience with interpreter isolation.\n\nIn the meantime, a separate proposal will discuss making the internal\nprotocol (and C-API) used by Interpreter.set_main_attrs() public. With\nthat protocol, support for other types could be added by extension\nmodules.\n\nCommunicating Through OS Pipes\n\nEven without a dedicated object for communication, users may already use\nexisting tools. For example, one basic approach for sending data between\ninterpreters is to use a pipe (see os.pipe()):\n\n1. interpreter A calls os.pipe() to get a read/write pair of file\n descriptors (both int objects)\n2. interpreter A calls interp.set_main_attrs(), binding the read FD (or\n embeds it using string formatting)\n3. interpreter A calls interp.exec() on interpreter B\n4. interpreter A writes some bytes to the write FD\n5. interpreter B reads those bytes\n\nSeveral of the earlier examples demonstrate this, such as Synchronize\nusing an OS pipe.\n\nChannels\n\nThe interpreters module will include a dedicated solution for passing\nobject data between interpreters: channels. They are included in the\nmodule in part to provide an easier mechanism than using os.pipe() and\nin part to demonstrate how libraries may take advantage of\nInterpreter.set_main_attrs() and the protocol it uses.\n\nA channel is a simplex FIFO. It is a basic, opt-in data sharing\nmechanism that draws inspiration from pipes, queues, and CSP's channels.\n[fifo] The main difference from pipes is that channels can be associated\nwith zero or more interpreters on either end. Like queues, which are\nalso many-to-many, channels are buffered (though they also offer methods\nwith unbuffered semantics).\n\nChannels have two operations: send and receive. A key characteristic of\nthose operations is that channels transmit data derived from Python\nobjects rather than the objects themselves. When objects are sent, their\ndata is extracted. When the \"object\" is received in the other\ninterpreter, the data is converted back into an object owned by that\ninterpreter.\n\nTo make this work, the mutable shared state will be managed by the\nPython runtime, not by any of the interpreters. Initially we will\nsupport only one type of objects for shared state: the channels provided\nby interpreters.create_channel(). Channels, in turn, will carefully\nmanage passing objects between interpreters.\n\nThis approach, including keeping the API minimal, helps us avoid further\nexposing any underlying complexity to Python users.\n\nThe interpreters module provides the following function related to\nchannels:\n\n create_channel() -> (RecvChannel, SendChannel):\n\n Create a new channel and return (recv, send), the RecvChannel\n and SendChannel corresponding to the ends of the channel.\n\n Both ends of the channel are supported \"shared\" objects (i.e.\n may be safely shared by different interpreters. Thus they\n may be set using \"Interpreter.set_main_attrs()\".\n\nThe module also provides the following channel-related classes:\n\n class RecvChannel(id):\n\n The receiving end of a channel. An interpreter may use this to\n receive objects from another interpreter. Any type supported by\n Interpreter.set_main_attrs() will be supported here, though at\n first only a few of the simple, immutable builtin types\n will be supported.\n\n id -> int:\n\n The channel's unique ID. The \"send\" end has the same one.\n\n recv(*, timeout=None):\n\n Return the next object from the channel. If none have been\n sent then wait until the next send (or until the timeout is hit).\n\n At the least, the object will be equivalent to the sent object.\n That will almost always mean the same type with the same data,\n though it could also be a compatible proxy. Regardless, it may\n use a copy of that data or actually share the data. That's up\n to the object's type.\n\n recv_nowait(default=None):\n\n Return the next object from the channel. If none have been\n sent then return the default. Otherwise, this is the same\n as the \"recv()\" method.\n\n\n class SendChannel(id):\n\n The sending end of a channel. An interpreter may use this to\n send objects to another interpreter. Any type supported by\n Interpreter.set_main_attrs() will be supported here, though\n at first only a few of the simple, immutable builtin types\n will be supported.\n\n id -> int:\n\n The channel's unique ID. The \"recv\" end has the same one.\n\n send(obj, *, timeout=None):\n\n Send the object (i.e. its data) to the \"recv\" end of the\n channel. Wait until the object is received. If the object\n is not shareable then ValueError is raised.\n\n The builtin memoryview is supported, so sending a buffer\n across involves first wrapping the object in a memoryview\n and then sending that.\n\n send_nowait(obj):\n\n Send the object to the \"recv\" end of the channel. This\n behaves the same as \"send()\", except for the waiting part.\n If no interpreter is currently receiving (waiting on the\n other end) then queue the object and return False. Otherwise\n return True.\n\nCaveats For Shared Objects\n\nAgain, Python objects are not shared between interpreters. However, in\nsome cases data those objects wrap is actually shared and not just\ncopied. One example might be PEP 3118 buffers.\n\nIn those cases the object in the original interpreter is kept alive\nuntil the shared data in the other interpreter is no longer used. Then\nobject destruction can happen like normal in the original interpreter,\nalong with the previously shared data.\n\nDocumentation\n\nThe new stdlib docs page for the interpreters module will include the\nfollowing:\n\n- (at the top) a clear note that support for multiple interpreters is\n not required from extension modules\n- some explanation about what subinterpreters are\n- brief examples of how to use multiple interpreters (and\n communicating between them)\n- a summary of the limitations of using multiple interpreters\n- (for extension maintainers) a link to the resources for ensuring\n multiple interpreters compatibility\n- much of the API information in this PEP\n\nDocs about resources for extension maintainers already exist on the\nIsolating Extension Modules howto page. Any extra help will be added\nthere. For example, it may prove helpful to discuss strategies for\ndealing with linked libraries that keep their own\nsubinterpreter-incompatible global state.\n\nNote that the documentation will play a large part in mitigating any\nnegative impact that the new interpreters module might have on extension\nmodule maintainers.\n\nAlso, the ImportError for incompatible extension modules will be updated\nto clearly say it is due to missing multiple interpreters compatibility\nand that extensions are not required to provide it. This will help set\nuser expectations properly.\n\nAlternative Solutions\n\nOne possible alternative to a new module is to add support for\ninterpreters to concurrent.futures. There are several reasons why that\nwouldn't work:\n\n- the obvious place to look for multiple interpreters support is an\n \"interpreters\" module, much as with \"threading\", etc.\n- concurrent.futures is all about executing functions but currently we\n don't have a good way to run a function from one interpreter in\n another\n\nSimilar reasoning applies for support in the multiprocessing module.\n\nOpen Questions\n\n- will is be too confusing that interp.exec() runs in the current\n thread?\n- should we add pickling fallbacks right now for interp.exec(), and/or\n Interpreter.set_main_attrs() and Interpreter.get_main_attr()?\n- should we support (limited) functions in interp.exec() right now?\n- rename Interpreter.close() to Interpreter.destroy()?\n- drop Interpreter.get_main_attr(), since we have channels?\n- should channels be its own PEP?\n\nDeferred Functionality\n\nIn the interest of keeping this proposal minimal, the following\nfunctionality has been left out for future consideration. Note that this\nis not a judgement against any of said capability, but rather a\ndeferment. That said, each is arguably valid.\n\nAdd convenience API\n\nThere are a number of things I can imagine would smooth out hypothetical\nrough edges with the new module:\n\n- add something like Interpreter.run() or Interpreter.call() that\n calls interp.exec() and falls back to pickle\n- fall back to pickle in Interpreter.set_main_attrs() and\n Interpreter.get_main_attr()\n\nThese would be easy to do if this proves to be a pain point.\n\nAvoid possible confusion about interpreters running in the current thread\n\nOne regular point of confusion has been that Interpreter.exec() executes\nin the current OS thread, temporarily blocking the current Python\nthread. It may be worth doing something to avoid that confusion.\n\nSome possible solutions for this hypothetical problem:\n\n- by default, run in a new thread?\n- add Interpreter.exec_in_thread()?\n- add Interpreter.exec_in_current_thread()?\n\nIn earlier versions of this PEP the method was interp.run(). The simple\nchange to interp.exec() alone will probably reduce confusion\nsufficiently, when coupled with educating users via the docs. It it\nturns out to be a real problem, we can pursue one of the alternatives at\nthat point.\n\nClarify \"running\" vs. \"has threads\"\n\nInterpreter.is_running() refers specifically to whether or not\nInterpreter.exec() (or similar) is running somewhere. It does not say\nanything about if the interpreter has any subthreads running. That\ninformation might be helpful.\n\nSome things we could do:\n\n- rename Interpreter.is_running() to Interpreter.is_running_main()\n- add Interpreter.has_threads(), to complement\n Interpreter.is_running()\n- expand to Interpreter.is_running(main=True, threads=False)\n\nNone of these are urgent and any could be done later, if desired.\n\nA Dunder Method For Sharing\n\nWe could add a special method, like __xid__ to correspond to tp_xid. At\nthe very least, it would allow Python types to convert their instances\nto some other type that implements tp_xid.\n\nThe problem is that exposing this capability to Python code presents a\ndegree of complixity that hasn't been explored yet, nor is there a\ncompelling case to investigate that complexity.\n\nInterpreter.call()\n\nIt would be convenient to run existing functions in subinterpreters\ndirectly. Interpreter.exec() could be adjusted to support this or a\ncall() method could be added:\n\n Interpreter.call(f, *args, **kwargs)\n\nThis suffers from the same problem as sharing objects between\ninterpreters via queues. The minimal solution (running a source string)\nis sufficient for us to get the feature out where it can be explored.\n\nInterpreter.run_in_thread()\n\nThis method would make a interp.exec() call for you in a thread. Doing\nthis using only threading.Thread and interp.exec() is relatively trivial\nso we've left it out.\n\nSynchronization Primitives\n\nThe threading module provides a number of synchronization primitives for\ncoordinating concurrent operations. This is especially necessary due to\nthe shared-state nature of threading. In contrast, interpreters do not\nshare state. Data sharing is restricted to the runtime's shareable\nobjects capability, which does away with the need for explicit\nsynchronization. If any sort of opt-in shared state support is added to\nCPython's interpreters in the future, that same effort can introduce\nsynchronization primitives to meet that need.\n\nCSP Library\n\nA csp module would not be a large step away from the functionality\nprovided by this PEP. However, adding such a module is outside the\nminimalist goals of this proposal.\n\nSyntactic Support\n\nThe Go language provides a concurrency model based on CSP, so it's\nsimilar to the concurrency model that multiple interpreters support.\nHowever, Go also provides syntactic support, as well as several builtin\nconcurrency primitives, to make concurrency a first-class feature.\nConceivably, similar syntactic (and builtin) support could be added to\nPython using interpreters. However, that is way outside the scope of\nthis PEP!\n\nMultiprocessing\n\nThe multiprocessing module could support interpreters in the same way it\nsupports threads and processes. In fact, the module's maintainer, Davin\nPotts, has indicated this is a reasonable feature request. However, it\nis outside the narrow scope of this PEP.\n\nC-extension opt-in/opt-out\n\nBy using the PyModuleDef_Slot introduced by PEP 489, we could easily add\na mechanism by which C-extension modules could opt out of multiple\ninterpreter support. Then the import machinery, when operating in a\nsubinterpreter, would need to check the module for support. It would\nraise an ImportError if unsupported.\n\nAlternately we could support opting in to multiple interpreters support.\nHowever, that would probably exclude many more modules (unnecessarily)\nthan the opt-out approach. Also, note that PEP 489 defined that an\nextension's use of the PEP's machinery implies multiple interpreters\nsupport.\n\nThe scope of adding the ModuleDef slot and fixing up the import\nmachinery is non-trivial, but could be worth it. It all depends on how\nmany extension modules break under subinterpreters. Given that there are\nrelatively few cases we know of through mod_wsgi, we can leave this for\nlater.\n\nPoisoning channels\n\nCSP has the concept of poisoning a channel. Once a channel has been\npoisoned, any send() or recv() call on it would raise a special\nexception, effectively ending execution in the interpreter that tried to\nuse the poisoned channel.\n\nThis could be accomplished by adding a poison() method to both ends of\nthe channel. The close() method can be used in this way (mostly), but\nthese semantics are relatively specialized and can wait.\n\nResetting __main__\n\nAs proposed, every call to Interpreter.exec() will execute in the\nnamespace of the interpreter's existing __main__ module. This means that\ndata persists there between interp.exec() calls. Sometimes this isn't\ndesirable and you want to execute in a fresh __main__. Also, you don't\nnecessarily want to leak objects there that you aren't using any more.\n\nNote that the following won't work right because it will clear too much\n(e.g. __name__ and the other \"__dunder__\" attributes:\n\n interp.exec('globals().clear()')\n\nPossible solutions include:\n\n- a create() arg to indicate resetting __main__ after each\n interp.exec() call\n- an Interpreter.reset_main flag to support opting in or out after the\n fact\n- an Interpreter.reset_main() method to opt in when desired\n- importlib.util.reset_globals() [reset_globals]\n\nAlso note that resetting __main__ does nothing about state stored in\nother modules. So any solution would have to be clear about the scope of\nwhat is being reset. Conceivably we could invent a mechanism by which\nany (or every) module could be reset, unlike reload() which does not\nclear the module before loading into it.\n\nRegardless, since __main__ is the execution namespace of the\ninterpreter, resetting it has a much more direct correlation to\ninterpreters and their dynamic state than does resetting other modules.\nSo a more generic module reset mechanism may prove unnecessary.\n\nThis isn't a critical feature initially. It can wait until later if\ndesirable.\n\nResetting an interpreter's state\n\nIt may be nice to re-use an existing subinterpreter instead of spinning\nup a new one. Since an interpreter has substantially more state than\njust the __main__ module, it isn't so easy to put an interpreter back\ninto a pristine/fresh state. In fact, there may be parts of the state\nthat cannot be reset from Python code.\n\nA possible solution is to add an Interpreter.reset() method. This would\nput the interpreter back into the state it was in when newly created. If\ncalled on a running interpreter it would fail (hence the main\ninterpreter could never be reset). This would likely be more efficient\nthan creating a new interpreter, though that depends on what\noptimizations will be made later to interpreter creation.\n\nWhile this would potentially provide functionality that is not otherwise\navailable from Python code, it isn't a fundamental functionality. So in\nthe spirit of minimalism here, this can wait. Regardless, I doubt it\nwould be controversial to add it post-PEP.\n\nCopy an existing interpreter's state\n\nRelatedly, it may be useful to support creating a new interpreter based\non an existing one, e.g. Interpreter.copy(). This ties into the idea\nthat a snapshot could be made of an interpreter's memory, which would\nmake starting up CPython, or creating new interpreters, faster in\ngeneral. The same mechanism could be used for a hypothetical\nInterpreter.reset(), as described previously.\n\nShareable file descriptors and sockets\n\nGiven that file descriptors and sockets are process-global resources,\nmaking them shareable is a reasonable idea. They would be a good\ncandidate for the first effort at expanding the supported shareable\ntypes. They aren't strictly necessary for the initial API.\n\nIntegration with async\n\nPer Antoine Pitrou [async]:\n\n Has any thought been given to how FIFOs could integrate with async\n code driven by an event loop (e.g. asyncio)? I think the model of\n executing several asyncio (or Tornado) applications each in their\n own subinterpreter may prove quite interesting to reconcile multi-\n core concurrency with ease of programming. That would require the\n FIFOs to be able to synchronize on something an event loop can wait\n on (probably a file descriptor?).\n\nThe basic functionality of multiple interpreters support does not depend\non async and can be added later.\n\nA possible solution is to provide async implementations of the blocking\nchannel methods (recv(), and send()).\n\nAlternately, \"readiness callbacks\" could be used to simplify use in\nasync scenarios. This would mean adding an optional callback (kw-only)\nparameter to the recv_nowait() and send_nowait() channel methods. The\ncallback would be called once the object was sent or received\n(respectively).\n\n(Note that making channels buffered makes readiness callbacks less\nimportant.)\n\nSupport for iteration\n\nSupporting iteration on RecvChannel (via __iter__() or _next__()) may be\nuseful. A trivial implementation would use the recv() method, similar to\nhow files do iteration. Since this isn't a fundamental capability and\nhas a simple analog, adding iteration support can wait until later.\n\nChannel context managers\n\nContext manager support on RecvChannel and SendChannel may be helpful.\nThe implementation would be simple, wrapping a call to close() (or maybe\nrelease()) like files do. As with iteration, this can wait.\n\nPipes and Queues\n\nWith the proposed object passing mechanism of \"os.pipe()\", other similar\nbasic types aren't strictly required to achieve the minimal useful\nfunctionality of multiple interpreters. Such types include pipes (like\nunbuffered channels, but one-to-one) and queues (like channels, but more\ngeneric). See below in Rejected Ideas for more information.\n\nEven though these types aren't part of this proposal, they may still be\nuseful in the context of concurrency. Adding them later is entirely\nreasonable. The could be trivially implemented as wrappers around\nchannels. Alternatively they could be implemented for efficiency at the\nsame low level as channels.\n\nReturn a lock from send()\n\nWhen sending an object through a channel, you don't have a way of\nknowing when the object gets received on the other end. One way to work\naround this is to return a locked threading.Lock from SendChannel.send()\nthat unlocks once the object is received.\n\nAlternately, the proposed SendChannel.send() (blocking) and\nSendChannel.send_nowait() provide an explicit distinction that is less\nlikely to confuse users.\n\nNote that returning a lock would matter for buffered channels (i.e.\nqueues). For unbuffered channels it is a non-issue.\n\nSupport prioritization in channels\n\nA simple example is queue.PriorityQueue in the stdlib.\n\nSupport inheriting settings (and more?)\n\nFolks might find it useful, when creating a new interpreter, to be able\nto indicate that they would like some things \"inherited\" by the new\ninterpreter. The mechanism could be a strict copy or it could be\ncopy-on-write. The motivating example is with the warnings module (e.g.\ncopy the filters).\n\nThe feature isn't critical, nor would it be widely useful, so it can\nwait until there's interest. Notably, both suggested solutions will\nrequire significant work, especially when it comes to complex objects\nand most especially for mutable containers of mutable complex objects.\n\nMake exceptions shareable\n\nExceptions are propagated out of run() calls, so it isn't a big leap to\nmake them shareable. However, as noted elsewhere, it isn't essential or\n(particularly common) so we can wait on doing that.\n\nMake everything shareable through serialization\n\nWe could use pickle (or marshal) to serialize everything and thus make\nthem shareable. Doing this is potentially inefficient, but it may be a\nmatter of convenience in the end. We can add it later, but trying to\nremove it later would be significantly more painful.\n\nMake RunFailedError.__cause__ lazy\n\nAn uncaught exception in a subinterpreter (from interp.exec()) is copied\nto the calling interpreter and set as __cause__ on a RunFailedError\nwhich is then raised. That copying part involves some sort of\ndeserialization in the calling interpreter, which can be expensive (e.g.\ndue to imports) yet is not always necessary.\n\nSo it may be useful to use an ExceptionProxy type to wrap the serialized\nexception and only deserialize it when needed. That could be via\nExceptionProxy__getattribute__() or perhaps through\nRunFailedError.resolve() (which would raise the deserialized exception\nand set RunFailedError.__cause__ to the exception.\n\nIt may also make sense to have RunFailedError.__cause__ be a descriptor\nthat does the lazy deserialization (and set __cause__) on the\nRunFailedError instance.\n\nReturn a value from interp.exec()\n\nCurrently interp.exec() always returns None. One idea is to return the\nreturn value from whatever the subinterpreter ran. However, for now it\ndoesn't make sense. The only thing folks can run is a string of code\n(i.e. a script). This is equivalent to PyRun_StringFlags(), exec(), or a\nmodule body. None of those \"return\" anything. We can revisit this once\ninterp.exec() supports functions, etc.\n\nAdd a shareable synchronization primitive\n\nThis would be _threading.Lock (or something like it) where interpreters\nwould actually share the underlying mutex. The main concern is that\nlocks and isolated interpreters may not mix well (as learned in Go).\n\nWe can add this later if it proves desirable without much trouble.\n\nPropagate SystemExit and KeyboardInterrupt Differently\n\nThe exception types that inherit from BaseException (aside from\nException) are usually treated specially. These types are:\nKeyboardInterrupt, SystemExit, and GeneratorExit. It may make sense to\ntreat them specially when it comes to propagation from interp.exec().\nHere are some options:\n\n * propagate like normal via RunFailedError\n * do not propagate (handle them somehow in the subinterpreter)\n * propagate them directly (avoid RunFailedError)\n * propagate them directly (set RunFailedError as __cause__)\n\nWe aren't going to worry about handling them differently. Threads\nalready ignore SystemExit, so for now we will follow that pattern.\n\nAdd an explicit release() and close() to channel end classes\n\nIt can be convenient to have an explicit way to close a channel against\nfurther global use. Likewise it could be useful to have an explicit way\nto release one of the channel ends relative to the current interpreter.\nAmong other reasons, such a mechanism is useful for communicating\noverall state between interpreters without the extra boilerplate that\npassing objects through a channel directly would require.\n\nThe challenge is getting automatic release/close right without making it\nhard to understand. This is especially true when dealing with a\nnon-empty channel. We should be able to get by without release/close for\nnow.\n\nAdd SendChannel.send_buffer()\n\nThis method would allow no-copy sending of an object through a channel\nif it supports the PEP 3118 buffer protocol (e.g. memoryview).\n\nSupport for this is not fundamental to channels and can be added on\nlater without much disruption.\n\nAuto-run in a thread\n\nThe PEP proposes a hard separation between subinterpreters and threads:\nif you want to run in a thread you must create the thread yourself and\ncall interp.exec() in it. However, it might be convenient if\ninterp.exec() could do that for you, meaning there would be less\nboilerplate.\n\nFurthermore, we anticipate that users will want to run in a thread much\nmore often than not. So it would make sense to make this the default\nbehavior. We would add a kw-only param \"threaded\" (default True) to\ninterp.exec() to allow the run-in-the-current-thread operation.\n\nRejected Ideas\n\nExplicit channel association\n\nInterpreters are implicitly associated with channels upon recv() and\nsend() calls. They are de-associated with release() calls. The\nalternative would be explicit methods. It would be either add_channel()\nand remove_channel() methods on Interpreter objects or something similar\non channel objects.\n\nIn practice, this level of management shouldn't be necessary for users.\nSo adding more explicit support would only add clutter to the API.\n\nAdd an API based on pipes\n\nA pipe would be a simplex FIFO between exactly two interpreters. For\nmost use cases this would be sufficient. It could potentially simplify\nthe implementation as well. However, it isn't a big step to supporting a\nmany-to-many simplex FIFO via channels. Also, with pipes the API ends up\nbeing slightly more complicated, requiring naming the pipes.\n\nAdd an API based on queues\n\nQueues and buffered channels are almost the same thing. The main\ndifference is that channels have a stronger relationship with context\n(i.e. the associated interpreter).\n\nThe name \"Channel\" was used instead of \"Queue\" to avoid confusion with\nthe stdlib queue.Queue.\n\n\"enumerate\"\n\nThe list_all() function provides the list of all interpreters. In the\nthreading module, which partly inspired the proposed API, the function\nis called enumerate(). The name is different here to avoid confusing\nPython users that are not already familiar with the threading API. For\nthem \"enumerate\" is rather unclear, whereas \"list_all\" is clear.\n\nAlternate solutions to prevent leaking exceptions across interpreters\n\nIn function calls, uncaught exceptions propagate to the calling frame.\nThe same approach could be taken with interp.exec(). However, this would\nmean that exception objects would leak across the inter-interpreter\nboundary. Likewise, the frames in the traceback would potentially leak.\n\nWhile that might not be a problem currently, it would be a problem once\ninterpreters get better isolation relative to memory management (which\nis necessary to stop sharing the GIL between interpreters). We've\nresolved the semantics of how the exceptions propagate by raising a\nRunFailedError instead, for which __cause__ wraps a safe proxy for the\noriginal exception and traceback.\n\nRejected possible solutions:\n\n- reproduce the exception and traceback in the original interpreter\n and raise that.\n- raise a subclass of RunFailedError that proxies the original\n exception and traceback.\n- raise RuntimeError instead of RunFailedError\n- convert at the boundary (a la subprocess.CalledProcessError)\n (requires a cross-interpreter representation)\n- support customization via Interpreter.excepthook (requires a\n cross-interpreter representation)\n- wrap in a proxy at the boundary (including with support for\n something like err.raise() to propagate the traceback).\n- return the exception (or its proxy) from interp.exec() instead of\n raising it\n- return a result object (like subprocess does) [result-object]\n (unnecessary complexity?)\n- throw the exception away and expect users to deal with unhandled\n exceptions explicitly in the script they pass to interp.exec() (they\n can pass error info out via channels); with threads you have to do\n something similar\n\nAlways associate each new interpreter with its own thread\n\nAs implemented in the C-API, an interpreter is not inherently tied to\nany thread. Furthermore, it will run in any existing thread, whether\ncreated by Python or not. You only have to activate one of its thread\nstates (PyThreadState) in the thread first. This means that the same\nthread may run more than one interpreter (though obviously not at the\nsame time).\n\nThe proposed module maintains this behavior. Interpreters are not tied\nto threads. Only calls to Interpreter.exec() are. However, one of the\nkey objectives of this PEP is to provide a more human-centric\nconcurrency model. With that in mind, from a conceptual standpoint the\nmodule might be easier to understand if each interpreter were associated\nwith its own thread.\n\nThat would mean interpreters.create() would create a new thread and\nInterpreter.exec() would only execute in that thread (and nothing else\nwould). The benefit is that users would not have to wrap\nInterpreter.exec() calls in a new threading.Thread. Nor would they be in\na position to accidentally pause the current interpreter (in the current\nthread) while their interpreter executes.\n\nThe idea is rejected because the benefit is small and the cost is high.\nThe difference from the capability in the C-API would be potentially\nconfusing. The implicit creation of threads is magical. The early\ncreation of threads is potentially wasteful. The inability to run\narbitrary interpreters in an existing thread would prevent some valid\nuse cases, frustrating users. Tying interpreters to threads would\nrequire extra runtime modifications. It would also make the module's\nimplementation overly complicated. Finally, it might not even make the\nmodule easier to understand.\n\nOnly associate interpreters upon use\n\nAssociate interpreters with channel ends only once recv(), send(), etc.\nare called.\n\nDoing this is potentially confusing and also can lead to unexpected\nraces where a channel is auto-closed before it can be used in the\noriginal (creating) interpreter.\n\nAllow multiple simultaneous calls to Interpreter.exec()\n\nThis would make sense especially if Interpreter.exec() were to manage\nnew threads for you (which we've rejected). Essentially, each call would\nrun independently, which would be mostly fine from a narrow technical\nstandpoint, since each interpreter can have multiple threads.\n\nThe problem is that the interpreter has only one __main__ module and\nsimultaneous Interpreter.exec() calls would have to sort out sharing\n__main__ or we'd have to invent a new mechanism. Neither would be simple\nenough to be worth doing.\n\nAdd a \"reraise\" method to RunFailedError\n\nWhile having __cause__ set on RunFailedError helps produce a more useful\ntraceback, it's less helpful when handling the original error. To help\nfacilitate this, we could add RunFailedError.reraise(). This method\nwould enable the following pattern:\n\n try:\n try:\n interp.exec(script)\n except RunFailedError as exc:\n exc.reraise()\n except MyException:\n ...\n\nThis would be made even simpler if there existed a __reraise__ protocol.\n\nAll that said, this is completely unnecessary. Using __cause__ is good\nenough:\n\n try:\n try:\n interp.exec(script)\n except RunFailedError as exc:\n raise exc.__cause__\n except MyException:\n ...\n\nNote that in extreme cases it may require a little extra boilerplate:\n\n try:\n try:\n interp.exec(script)\n except RunFailedError as exc:\n if exc.__cause__ is not None:\n raise exc.__cause__\n raise # re-raise\n except MyException:\n ...\n\nImplementation\n\nThe implementation of the PEP has 4 parts:\n\n- the high-level module described in this PEP (mostly a light wrapper\n around a low-level C extension\n- the low-level C extension module\n- additions to the internal C-API needed by the low-level module\n- secondary fixes/changes in the CPython runtime that facilitate the\n low-level module (among other benefits)\n\nThese are at various levels of completion, with more done the lower you\ngo:\n\n- the high-level module has been, at best, roughly implemented.\n However, fully implementing it will be almost trivial.\n- the low-level module is mostly complete. The bulk of the\n implementation was merged into master in December 2018 as the\n \"_xxsubinterpreters\" module (for the sake of testing multiple\n interpreters functionality). Only the exception propagation\n implementation remains to be finished, which will not require\n extensive work.\n- all necessary C-API work has been finished\n- all anticipated work in the runtime has been finished\n\nThe implementation effort for PEP 554 is being tracked as part of a\nlarger project aimed at improving multi-core support in CPython.\n[multi-core-project]\n\nReferences\n\n- \n\n mp-conn\n\n https://docs.python.org/3/library/multiprocessing.html#connection-objects\n\n- \n\n main-thread\n\n https://mail.python.org/pipermail/python-ideas/2017-September/047144.html\n https://mail.python.org/pipermail/python-dev/2017-September/149566.html\n\n- \n\n petr-c-ext\n\n https://mail.python.org/pipermail/import-sig/2016-June/001062.html\n https://mail.python.org/pipermail/python-ideas/2016-April/039748.html\n\nCopyright\n\nThis document has been placed in the public domain.\n\nCSP\n\n https://en.wikipedia.org/wiki/Communicating_sequential_processes\n https://github.com/futurecore/python-csp\n\nasync\n\n https://mail.python.org/pipermail/python-dev/2017-September/149420.html\n https://mail.python.org/pipermail/python-dev/2017-September/149585.html\n\nbenefits\n\n https://mail.python.org/pipermail/python-ideas/2017-September/047122.html\n\nbug-rate\n\n https://mail.python.org/pipermail/python-ideas/2017-September/047094.html\n\nc-api\n\n https://docs.python.org/3/c-api/init.html#sub-interpreter-support\n\ncache-line-ping-pong\n\n https://mail.python.org/archives/list/python-dev@python.org/message/3HVRFWHDMWPNR367GXBILZ4JJAUQ2STZ/\n\ncaveats\n\n https://docs.python.org/3/c-api/init.html#bugs-and-caveats\n\ncryptography\n\n https://github.com/pyca/cryptography/issues/2299\n\nfifo\n\n https://docs.python.org/3/library/multiprocessing.html#multiprocessing.Pipe\n https://docs.python.org/3/library/multiprocessing.html#multiprocessing.Queue\n https://docs.python.org/3/library/queue.html#module-queue\n http://stackless.readthedocs.io/en/2.7-slp/library/stackless/channels.html\n https://golang.org/doc/effective_go.html#sharing\n http://www.jtolds.com/writing/2016/03/go-channels-are-bad-and-you-should-feel-bad/\n\ngilstate\n\n https://bugs.python.org/issue10915 http://bugs.python.org/issue15751\n\njython\n\n https://mail.python.org/pipermail/python-ideas/2017-May/045771.html\n\nmulti-core-project\n\n https://github.com/ericsnowcurrently/multi-core-python\n\nreset_globals\n\n https://mail.python.org/pipermail/python-dev/2017-September/149545.html\n\nresult-object\n\n https://mail.python.org/pipermail/python-dev/2017-September/149562.html"},"source":{"kind":"string","value":"python-peps"},"added":{"kind":"string","value":"2024-10-18T13:23:32.750387"},"created":{"kind":"timestamp","value":"2017-09-05T00:00:00","string":"2017-09-05T00:00:00"},"metadata":{"kind":"string","value":"{\n \"license\": \"Public Domain\",\n \"url\": \"https://peps.python.org/pep-0554/\",\n \"authors\": [\n \"Eric Snow\"\n ],\n \"pep_number\": \"0554\",\n \"pandoc_version\": \"3.5\"\n}"}}},{"rowIdx":541,"cells":{"id":{"kind":"string","value":"0692"},"text":{"kind":"string","value":"PEP: 692 Title: Using TypedDict for more precise **kwargs typing Author:\nFranek Magiera Sponsor: Jelle Zijlstra\n Discussions-To:\nhttps://discuss.python.org/t/pep-692-using-typeddict-for-more-precise-kwargs-typing/17314\nStatus: Final Type: Standards Track Topic: Typing Created: 29-May-2022\nPython-Version: 3.12 Post-History: 29-May-2022, 12-Jul-2022,\n12-Jul-2022, Resolution:\nhttps://discuss.python.org/t/pep-692-using-typeddict-for-more-precise-kwargs-typing/17314/81\n\ntyping:unpack-kwargs\n\nAbstract\n\nCurrently **kwargs can be type hinted as long as all of the keyword\narguments specified by them are of the same type. However, that\nbehaviour can be very limiting. Therefore, in this PEP we propose a new\nway to enable more precise **kwargs typing. The new approach revolves\naround using TypedDict to type **kwargs that comprise keyword arguments\nof different types.\n\nMotivation\n\nCurrently annotating **kwargs with a type T means that the kwargs type\nis in fact dict[str, T]. For example:\n\n def foo(**kwargs: str) -> None: ...\n\nmeans that all keyword arguments in foo are strings (i.e., kwargs is of\ntype dict[str, str]). This behaviour limits the ability to type annotate\n**kwargs only to the cases where all of them are of the same type.\nHowever, it is often the case that keyword arguments conveyed by\n**kwargs have different types that are dependent on the keyword's name.\nIn those cases type annotating **kwargs is not possible. This is\nespecially a problem for already existing codebases where the need of\nrefactoring the code in order to introduce proper type annotations may\nbe considered not worth the effort. This in turn prevents the project\nfrom getting all of the benefits that type hinting can provide.\n\nMoreover, **kwargs can be used to reduce the amount of code needed in\ncases when there is a top-level function that is a part of a public API\nand it calls a bunch of helper functions, all of which expect the same\nkeyword arguments. Unfortunately, if those helper functions were to use\n**kwargs, there is no way to properly type hint them if the keyword\narguments they expect are of different types. In addition, even if the\nkeyword arguments are of the same type, there is no way to check whether\nthe function is being called with keyword names that it actually\nexpects.\n\nAs described in the Intended Usage section, using **kwargs is not always\nthe best tool for the job. Despite that, it is still a widely used\npattern. As a consequence, there has been a lot of discussion around\nsupporting more precise **kwargs typing and it became a feature that\nwould be valuable for a large part of the Python community. This is best\nillustrated by the mypy GitHub issue 4441 which contains a lot of real\nworld cases that could benefit from this propsal.\n\nOne more use case worth mentioning for which **kwargs are also\nconvenient, is when a function should accommodate optional keyword-only\narguments that don't have default values. A need for a pattern like that\ncan arise when values that are usually used as defaults to indicate no\nuser input, such as None, can be passed in by a user and should result\nin a valid, non-default behavior. For example, this issue came up in the\npopular httpx library.\n\nRationale\n\nPEP 589 introduced the TypedDict type constructor that supports\ndictionary types consisting of string keys and values of potentially\ndifferent types. A function's keyword arguments represented by a formal\nparameter that begins with double asterisk, such as **kwargs, are\nreceived as a dictionary. Additionally, such functions are often called\nusing unpacked dictionaries to provide keyword arguments. This makes\nTypedDict a perfect candidate to be used for more precise **kwargs\ntyping. In addition, with TypedDict keyword names can be taken into\naccount during static type analysis. However, specifying **kwargs type\nwith a TypedDict means, as mentioned earlier, that each keyword argument\nspecified by **kwargs is a TypedDict itself. For instance:\n\n class Movie(TypedDict):\n name: str\n year: int\n\n def foo(**kwargs: Movie) -> None: ...\n\nmeans that each keyword argument in foo is itself a Movie dictionary\nthat has a name key with a string type value and a year key with an\ninteger type value. Therefore, in order to support specifying kwargs\ntype as a TypedDict without breaking current behaviour, a new construct\nhas to be introduced.\n\nTo support this use case, we propose reusing Unpack which was initially\nintroduced in PEP 646. There are several reasons for doing so:\n\n- Its name is quite suitable and intuitive for the **kwargs typing use\n case as our intention is to \"unpack\" the keywords arguments from the\n supplied TypedDict.\n- The current way of typing *args would be extended to **kwargs and\n those are supposed to behave similarly.\n- There would be no need to introduce any new special forms.\n- The use of Unpack for the purposes described in this PEP does not\n interfere with the use cases described in PEP 646.\n\nSpecification\n\nWith Unpack we introduce a new way of annotating **kwargs. Continuing\nthe previous example:\n\n def foo(**kwargs: Unpack[Movie]) -> None: ...\n\nwould mean that the **kwargs comprise two keyword arguments specified by\nMovie (i.e. a name keyword of type str and a year keyword of type int).\nThis indicates that the function should be called as follows:\n\n kwargs: Movie = {\"name\": \"Life of Brian\", \"year\": 1979}\n\n foo(**kwargs) # OK!\n foo(name=\"The Meaning of Life\", year=1983) # OK!\n\nWhen Unpack is used, type checkers treat kwargs inside the function body\nas a TypedDict:\n\n def foo(**kwargs: Unpack[Movie]) -> None:\n assert_type(kwargs, Movie) # OK!\n\nUsing the new annotation will not have any runtime effect - it should\nonly be taken into account by type checkers. Any mention of errors in\nthe following sections relates to type checker errors.\n\nFunction calls with standard dictionaries\n\nPassing a dictionary of type dict[str, object] as a **kwargs argument to\na function that has **kwargs annotated with Unpack must generate a type\nchecker error. On the other hand, the behaviour for functions using\nstandard, untyped dictionaries can depend on the type checker. For\nexample:\n\n def foo(**kwargs: Unpack[Movie]) -> None: ...\n\n movie: dict[str, object] = {\"name\": \"Life of Brian\", \"year\": 1979}\n foo(**movie) # WRONG! Movie is of type dict[str, object]\n\n typed_movie: Movie = {\"name\": \"The Meaning of Life\", \"year\": 1983}\n foo(**typed_movie) # OK!\n\n another_movie = {\"name\": \"Life of Brian\", \"year\": 1979}\n foo(**another_movie) # Depends on the type checker.\n\nKeyword collisions\n\nA TypedDict that is used to type **kwargs could potentially contain keys\nthat are already defined in the function's signature. If the duplicate\nname is a standard parameter, an error should be reported by type\ncheckers. If the duplicate name is a positional-only parameter, no\nerrors should be generated. For example:\n\n def foo(name, **kwargs: Unpack[Movie]) -> None: ... # WRONG! \"name\" will\n # always bind to the\n # first parameter.\n\n def foo(name, /, **kwargs: Unpack[Movie]) -> None: ... # OK! \"name\" is a\n # positional-only parameter,\n # so **kwargs can contain\n # a \"name\" keyword.\n\nRequired and non-required keys\n\nBy default all keys in a TypedDict are required. This behaviour can be\noverridden by setting the dictionary's total parameter as False.\nMoreover, PEP 655 introduced new type qualifiers - typing.Required and\ntyping.NotRequired - that enable specifying whether a particular key is\nrequired or not:\n\n class Movie(TypedDict):\n title: str\n year: NotRequired[int]\n\nWhen using a TypedDict to type **kwargs all of the required and\nnon-required keys should correspond to required and non-required\nfunction keyword parameters. Therefore, if a required key is not\nsupported by the caller, then an error must be reported by type\ncheckers.\n\nAssignment\n\nAssignments of a function typed with **kwargs: Unpack[Movie] and another\ncallable type should pass type checking only if they are compatible.\nThis can happen for the scenarios described below.\n\nSource and destination contain **kwargs\n\nBoth destination and source functions have a **kwargs: Unpack[TypedDict]\nparameter and the destination function's TypedDict is assignable to the\nsource function's TypedDict and the rest of the parameters are\ncompatible:\n\n class Animal(TypedDict):\n name: str\n\n class Dog(Animal):\n breed: str\n\n def accept_animal(**kwargs: Unpack[Animal]): ...\n def accept_dog(**kwargs: Unpack[Dog]): ...\n\n accept_dog = accept_animal # OK! Expression of type Dog can be\n # assigned to a variable of type Animal.\n\n accept_animal = accept_dog # WRONG! Expression of type Animal\n # cannot be assigned to a variable of type Dog.\n\nSource contains **kwargs and destination doesn't\n\nThe destination callable doesn't contain **kwargs, the source callable\ncontains **kwargs: Unpack[TypedDict] and the destination function's\nkeyword arguments are assignable to the corresponding keys in source\nfunction's TypedDict. Moreover, not required keys should correspond to\noptional function arguments, whereas required keys should correspond to\nrequired function arguments. Again, the rest of the parameters have to\nbe compatible. Continuing the previous example:\n\n class Example(TypedDict):\n animal: Animal \n string: str\n number: NotRequired[int]\n\n def src(**kwargs: Unpack[Example]): ...\n def dest(*, animal: Dog, string: str, number: int = ...): ...\n\n dest = src # OK!\n\nIt is worth pointing out that the destination function's parameters that\nare to be compatible with the keys and values from the TypedDict must be\nkeyword only:\n\n def dest(dog: Dog, string: str, number: int = ...): ...\n\n dog: Dog = {\"name\": \"Daisy\", \"breed\": \"labrador\"}\n\n dest(dog, \"some string\") # OK!\n\n dest = src # Type checker error!\n dest(dog, \"some string\") # The same call fails at\n # runtime now because 'src' expects\n # keyword arguments.\n\nThe reverse situation where the destination callable contains\n**kwargs: Unpack[TypedDict] and the source callable doesn't contain\n**kwargs should be disallowed. This is because, we cannot be sure that\nadditional keyword arguments are not being passed in when an instance of\na subclass had been assigned to a variable with a base class type and\nthen unpacked in the destination callable invocation:\n\n def dest(**kwargs: Unpack[Animal]): ...\n def src(name: str): ...\n\n dog: Dog = {\"name\": \"Daisy\", \"breed\": \"Labrador\"}\n animal: Animal = dog\n\n dest = src # WRONG!\n dest(**animal) # Fails at runtime.\n\nSimilar situation can happen even without inheritance as compatibility\nbetween TypedDicts is based on structural subtyping.\n\nSource contains untyped **kwargs\n\nThe destination callable contains **kwargs: Unpack[TypedDict] and the\nsource callable contains untyped **kwargs:\n\n def src(**kwargs): ...\n def dest(**kwargs: Unpack[Movie]): ...\n\n dest = src # OK!\n\nSource contains traditionally typed **kwargs: T\n\nThe destination callable contains **kwargs: Unpack[TypedDict], the\nsource callable contains traditionally typed **kwargs: T and each of the\ndestination function TypedDict's fields is assignable to a variable of\ntype T:\n\n class Vehicle:\n ...\n\n class Car(Vehicle):\n ...\n\n class Motorcycle(Vehicle):\n ...\n\n class Vehicles(TypedDict):\n car: Car\n moto: Motorcycle\n\n def dest(**kwargs: Unpack[Vehicles]): ...\n def src(**kwargs: Vehicle): ...\n\n dest = src # OK!\n\nOn the other hand, if the destination callable contains either untyped\nor traditionally typed **kwargs: T and the source callable is typed\nusing **kwargs: Unpack[TypedDict] then an error should be generated,\nbecause traditionally typed **kwargs aren't checked for keyword names.\n\nTo summarize, function parameters should behave contravariantly and\nfunction return types should behave covariantly.\n\nPassing kwargs inside a function to another function\n\nA previous point mentions the problem of possibly passing additional\nkeyword arguments by assigning a subclass instance to a variable that\nhas a base class type. Let's consider the following example:\n\n class Animal(TypedDict):\n name: str\n\n class Dog(Animal):\n breed: str\n\n def takes_name(name: str): ...\n\n dog: Dog = {\"name\": \"Daisy\", \"breed\": \"Labrador\"}\n animal: Animal = dog\n\n def foo(**kwargs: Unpack[Animal]):\n print(kwargs[\"name\"].capitalize())\n\n def bar(**kwargs: Unpack[Animal]):\n takes_name(**kwargs)\n\n def baz(animal: Animal):\n takes_name(**animal)\n\n def spam(**kwargs: Unpack[Animal]):\n baz(kwargs)\n\n foo(**animal) # OK! foo only expects and uses keywords of 'Animal'.\n\n bar(**animal) # WRONG! This will fail at runtime because 'breed' keyword\n # will be passed to 'takes_name' as well.\n\n spam(**animal) # WRONG! Again, 'breed' keyword will be eventually passed\n # to 'takes_name'.\n\nIn the example above, the call to foo will not cause any issues at\nruntime. Even though foo expects kwargs of type Animal it doesn't matter\nif it receives additional arguments because it only reads and uses what\nit needs completely ignoring any additional values.\n\nThe calls to bar and spam will fail because an unexpected keyword\nargument will be passed to the takes_name function.\n\nTherefore, kwargs hinted with an unpacked TypedDict can only be passed\nto another function if the function to which unpacked kwargs are being\npassed to has **kwargs in its signature as well, because then additional\nkeywords would not cause errors at runtime during function invocation.\nOtherwise, the type checker should generate an error.\n\nIn cases similar to the bar function above the problem could be worked\naround by explicitly dereferencing desired fields and using them as\narguments to perform the function call:\n\n def bar(**kwargs: Unpack[Animal]):\n name = kwargs[\"name\"]\n takes_name(name)\n\nUsing Unpack with types other than TypedDict\n\nAs described in the Rationale section, TypedDict is the most natural\ncandidate for typing **kwargs. Therefore, in the context of typing\n**kwargs, using Unpack with types other than TypedDict should not be\nallowed and type checkers should generate errors in such cases.\n\nChanges to Unpack\n\nCurrently using Unpack in the context of typing is interchangeable with\nusing the asterisk syntax:\n\n >>> Unpack[Movie]\n *\n\nTherefore, in order to be compatible with the new use case, Unpack's\nrepr should be changed to simply Unpack[T].\n\nIntended Usage\n\nThe intended use cases for this proposal are described in the Motivation\nsection. In summary, more precise **kwargs typing can bring benefits to\nalready existing codebases that decided to use **kwargs initially, but\nnow are mature enough to use a stricter contract via type hints. Using\n**kwargs can also help in reducing code duplication and the amount of\ncopy-pasting needed when there is a bunch of functions that require the\nsame set of keyword arguments. Finally, **kwargs are useful for cases\nwhen a function needs to facilitate optional keyword arguments that\ndon't have obvious default values.\n\nHowever, it has to be pointed out that in some cases there are better\ntools for the job than using TypedDict to type **kwargs as proposed in\nthis PEP. For example, when writing new code if all the keyword\narguments are required or have default values then writing everything\nexplicitly is better than using **kwargs and a TypedDict:\n\n def foo(name: str, year: int): ... # Preferred way.\n def foo(**kwargs: Unpack[Movie]): ...\n\nSimilarly, when type hinting third party libraries via stubs it is again\nbetter to state the function signature explicitly - this is the only way\nto type such a function if it has default arguments. Another issue that\nmay arise in this case when trying to type hint the function with a\nTypedDict is that some standard function parameters may be treated as\nkeyword only:\n\n def foo(name, year): ... # Function in a third party library.\n\n def foo(Unpack[Movie]): ... # Function signature in a stub file.\n\n foo(\"Life of Brian\", 1979) # This would be now failing type\n # checking but is fine.\n\n foo(name=\"Life of Brian\", year=1979) # This would be the only way to call\n # the function now that passes type\n # checking.\n\nTherefore, in this case it is again preferred to type hint such function\nexplicitly as:\n\n def foo(name: str, year: int): ...\n\nAlso, for the benefit of IDEs and documentation pages, functions that\nare part of the public API should prefer explicit keyword parameters\nwhenever possible.\n\nHow to Teach This\n\nThis PEP could be linked in the typing module's documentation. Moreover,\na new section on using Unpack could be added to the aforementioned docs.\nSimilar sections could be also added to the mypy documentation and the\ntyping RTD documentation.\n\nReference Implementation\n\nThe mypy type checker already supports more precise **kwargs typing\nusing Unpack.\n\nPyright type checker also provides provisional support for this feature.\n\nRejected Ideas\n\nTypedDict unions\n\nIt is possible to create unions of typed dictionaries. However,\nsupporting typing **kwargs with a union of typed dicts would greatly\nincrease the complexity of the implementation of this PEP and there\nseems to be no compelling use case to justify the support for this.\nTherefore, using unions of typed dictionaries to type **kwargs as\ndescribed in the context of this PEP can result in an error:\n\n class Book(TypedDict):\n genre: str\n pages: int\n\n TypedDictUnion = Movie | Book\n\n def foo(**kwargs: Unpack[TypedDictUnion]) -> None: ... # WRONG! Unsupported use\n # of a union of\n # TypedDicts to type\n # **kwargs\n\nInstead, a function that expects a union of TypedDicts can be\noverloaded:\n\n @overload\n def foo(**kwargs: Unpack[Movie]): ...\n\n @overload\n def foo(**kwargs: Unpack[Book]): ...\n\nChanging the meaning of **kwargs annotations\n\nOne way to achieve the purpose of this PEP would be to change the\nmeaning of **kwargs annotations, so that the annotations would apply to\nthe entire **kwargs dict, not to individual elements. For consistency,\nwe would have to make an analogous change to *args annotations.\n\nThis idea was discussed in a meeting of the typing community, and the\nconsensus was that the change would not be worth the cost. There is no\nclear migration path, the current meaning of *args and **kwargs\nannotations is well-established in the ecosystem, and type checkers\nwould have to introduce new errors for code that is currently legal.\n\nIntroducing a new syntax\n\nIn the previous versions of this PEP, using a double asterisk syntax was\nproposed to support more precise **kwargs typing. Using this syntax,\nfunctions could be annotated as follows:\n\n def foo(**kwargs: **Movie): ...\n\nWhich would have the same meaning as:\n\n def foo(**kwargs: Unpack[Movie]): ...\n\nThis greatly increased the scope of the PEP, as it would require a\ngrammar change and adding a new dunder for the Unpack special form. At\nthe same the justification for introducing a new syntax was not strong\nenough and became a blocker for the whole PEP. Therefore, we decided to\nabandon the idea of introducing a new syntax as a part of this PEP and\nmay propose it again in a separate one.\n\nReferences\n\nCopyright\n\nThis document is placed in the public domain or under the\nCC0-1.0-Universal license, whichever is more permissive."},"source":{"kind":"string","value":"python-peps"},"added":{"kind":"string","value":"2024-10-18T13:23:32.777344"},"created":{"kind":"timestamp","value":"2022-05-29T00:00:00","string":"2022-05-29T00:00:00"},"metadata":{"kind":"string","value":"{\n \"license\": \"Public Domain\",\n \"url\": \"https://peps.python.org/pep-0692/\",\n \"authors\": [\n \"Franek Magiera\"\n ],\n \"pep_number\": \"0692\",\n \"pandoc_version\": \"3.5\"\n}"}}},{"rowIdx":542,"cells":{"id":{"kind":"string","value":"8104"},"text":{"kind":"string","value":"PEP: 8104 Title: 2023 Term Steering Council election Author: Ee Durbin\n Sponsor: Brett Cannon Status: Final\nType: Informational Topic: Governance Created: 08-Nov-2022\n\nAbstract\n\nThis document describes the schedule and other details of the December\n2022 election for the Python steering council, as specified in PEP 13.\nThis is the steering council election for the 2023 term (i.e. Python\n3.12).\n\nElection Administration\n\nThe steering council appointed the Python Software Foundation Director\nof Infrastructure, Ee Durbin, to administer the election.\n\nSchedule\n\nThere will be a two-week nomination period, followed by a two-week vote.\n\nThe nomination period was: November 14, 2022 through November 28, 2022\nAoE[1].\n\nThe voting period was: December 1, 2022 through December 14, 2022\nAoE[2].\n\nCandidates\n\nCandidates must be nominated by a core team member. If the candidate is\na core team member, they may nominate themselves.\n\nNominees (in alphabetical order):\n\n- Brett Cannon\n- Emily Morehouse\n- Dong-hee Na\n- Pablo Galindo Salgado\n- Gregory P. Smith\n- Victor Stinner\n- Petr Viktorin\n- Thomas Wouters\n\nWithdrawn nominations:\n\n- None\n\nVoter Roll\n\nAll active Python core team members are eligible to vote. Active status\nis determined as described in PEP 13 <13#membership> and implemented via\nthe software at python/voters [3].\n\nBallots will be distributed based on the the Python Voter Roll[4] for\nthis election.\n\nWhile this file is not public as it contains private email addresses,\nthe Complete Voter Roll by name will be made available when the roll is\ncreated.\n\nElection Implementation\n\nThe election will be conducted using the Helios Voting Service.\n\nConfiguration\n\nShort name: 2023-python-steering-council\n\nName: 2023 Python Steering Council Election\n\nDescription:\nElection for the Python steering council, as specified in PEP 13. This is steering council election for the 2023 term.\n\ntype: Election\n\nUse voter aliases: [X]\n\nRandomize answer order: [X]\n\nPrivate: [X]\n\nHelp Email Address: psf-election@python.org\n\nVoting starts at: December 1, 2022 12:00 UTC\n\nVoting ends at: December 15, 2022 12:00 UTC\n\nThis will create an election in which:\n\n- Voting is not open to the public, only those on the Voter Roll may\n participate. Ballots will be emailed when voting starts.\n- Candidates are presented in random order, to help avoid bias.\n- Voter identities and ballots are protected against cryptographic\n advances.\n\nQuestions\n\nQuestion 1\n\nSelect between 0 and - (approval) answers. Result Type: absolute\n\nQuestion: Select candidates for the Python Steering Council\n\nAnswer #1 - #N: Candidates from Candidates_ Section\n\nResults\n\nOf 85 eligible voters, 66 cast ballots.\n\nThe top five vote-getters are:\n\n- Pablo Galindo Salgado\n- Gregory P. Smith\n- Emily Morehouse\n- Brett Cannon\n- Thomas Wouters\n\nNo conflict of interest as defined in PEP 13 were observed.\n\nThe full vote counts are as follows:\n\n ----------------------------------------\n Candidate Votes Received\n ----------------------- ----------------\n Pablo Galindo Salgado 61\n\n Gregory P. Smith 48\n\n Emily Morehouse 47\n\n Brett Cannon 42\n\n Thomas Wouters 39\n\n Petr Viktorin 36\n\n Victor Stinner 34\n\n Dong-hee Na 29\n ----------------------------------------\n\nCopyright\n\nThis document has been placed in the public domain.\n\nComplete Voter Roll\n\nActive Python core developers\n\n Alex Gaynor\n Alex Waygood\n Ammar Askar\n Andrew Svetlov\n Antoine Pitrou\n Barry Warsaw\n Batuhan Taskaya\n Benjamin Peterson\n Berker Peksağ\n Brandt Bucher\n Brett Cannon\n Brian Curtin\n Brian Quinlan\n Carol Willing\n Cheryl Sabella\n Chris Jerdonek\n Chris Withers\n Christian Heimes\n Dennis Sweeney\n Dino Viehland\n Dong-hee Na\n Emily Morehouse\n Éric Araujo\n Eric Snow\n Eric V. Smith\n Erlend Egeberg Aasland\n Ethan Furman\n Ezio Melotti\n Facundo Batista\n Filipe Laíns\n Fred Drake\n Georg Brandl\n Giampaolo Rodolà\n Gregory P. Smith\n Guido van Rossum\n Hugo van Kemenade\n Hynek Schlawack\n Inada Naoki\n Irit Katriel\n Ivan Levkivskyi\n Jason R. Coombs\n Jelle Zijlstra\n Jeremy Kloth\n Jesús Cea\n Joannah Nanjekye\n Julien Palard\n Karthikeyan Singaravelan\n Ken Jin\n Kumar Aditya\n Kurt B. Kaiser\n Kushal Das\n Kyle Stanley\n Larry Hastings\n Łukasz Langa\n Lysandros Nikolaou\n Marc-André Lemburg\n Mariatta\n Mark Dickinson\n Mark Shannon\n Nathaniel J. Smith\n Ned Deily\n Neil Schemenauer\n Alyssa Coghlan\n Pablo Galindo\n Paul Ganssle\n Paul Moore\n Petr Viktorin\n R. David Murray\n Raymond Hettinger\n Ronald Oussoren\n Senthil Kumaran\n Serhiy Storchaka\n Stefan Behnel\n Stéphane Wirtel\n Steve Dower\n Steven D'Aprano\n Tal Einat\n Terry Jan Reedy\n Thomas Wouters\n Tim Golden\n Tim Peters\n Victor Stinner\n Vinay Sajip\n Yury Selivanov\n Zachary Ware\n\n[1] AoE: Anywhere on Earth.\n\n[2] AoE: Anywhere on Earth.\n\n[3] This repository is private and accessible only to Python Core\nDevelopers, administrators, and Python Software Foundation Staff as it\ncontains personal email addresses.\n\n[4] This repository is private and accessible only to Python Core\nDevelopers, administrators, and Python Software Foundation Staff as it\ncontains personal email addresses."},"source":{"kind":"string","value":"python-peps"},"added":{"kind":"string","value":"2024-10-18T13:23:32.791087"},"created":{"kind":"timestamp","value":"2022-11-08T00:00:00","string":"2022-11-08T00:00:00"},"metadata":{"kind":"string","value":"{\n \"license\": \"Public Domain\",\n \"url\": \"https://peps.python.org/pep-8104/\",\n \"authors\": [\n \"Ee Durbin\"\n ],\n \"pep_number\": \"8104\",\n \"pandoc_version\": \"3.5\"\n}"}}},{"rowIdx":543,"cells":{"id":{"kind":"string","value":"0668"},"text":{"kind":"string","value":"PEP: 668 Title: Marking Python base environments as “externally managed”\nAuthor: Geoffrey Thomas , Matthias Klose\n, Filipe Laíns , Donald Stufft\n, Tzu-ping Chung , Stefano Rivera\n, Elana Hashman , Pradyun\nGedam PEP-Delegate: Paul Moore\n Discussions-To: https://discuss.python.org/t/10302\nStatus: Accepted Type: Standards Track Topic: Packaging Content-Type:\ntext/x-rst Created: 18-May-2021 Post-History: 28-May-2021 Resolution:\nhttps://discuss.python.org/t/10302/44\n\nexternally-managed-environments\n\nAbstract\n\nA long-standing practical problem for Python users has been conflicts\nbetween OS package managers and Python-specific package management tools\nlike pip. These conflicts include both Python-level API\nincompatibilities and conflicts over file ownership.\n\nHistorically, Python-specific package management tools have defaulted to\ninstalling packages into an implicit global context. With the\nstandardization and popularity of virtual environments, a better\nsolution for most (but not all) use cases is to use Python-specific\npackage management tools only within a virtual environment.\n\nThis PEP proposes a mechanism for a Python installation to communicate\nto tools like pip that its global package installation context is\nmanaged by some means external to Python, such as an OS package manager.\nIt specifies that Python-specific package management tools should\nneither install nor remove packages into the interpreter's global\ncontext, by default, and should instead guide the end user towards using\na virtual environment.\n\nIt also standardizes an interpretation of the sysconfig schemes so that,\nif a Python-specific package manager is about to install a package in an\ninterpreter-wide context, it can do so in a manner that will avoid\nconflicting with the external package manager and reduces the risk of\nbreaking software shipped by the external package manager.\n\nTerminology\n\nA few terms used in this PEP have multiple meanings in the contexts that\nit spans. For clarity, this PEP uses the following terms in specific\nways:\n\ndistro\n\n Short for \"distribution,\" a collection of various sorts of software,\n ideally designed to work properly together, including (in contexts\n relevant to this document) the Python interpreter itself, software\n written in Python, and software written in other languages. That is,\n this is the sense used in phrases such as \"Linux distro\" or\n \"Berkeley Software Distribution.\"\n\n A distro can be an operating system (OS) of its own, such as Debian,\n Fedora, or FreeBSD. It can also be an overlay distribution that\n installs on top of an existing OS, such as Homebrew or MacPorts.\n\n This document uses the short term \"distro,\" because the term\n \"distribution\" has another meaning in Python packaging contexts: a\n source or binary distribution package of a single piece of Python\n language software, that is, in the sense of\n setuptools.dist.Distribution or \"sdist\". To avoid confusion, this\n document does not use the plain term \"distribution\" at all. In the\n Python packaging sense, it uses the full phrase \"distribution\n package\" or just \"package\" (see below).\n\n The provider of a distro - the team or company that collects and\n publishes the software and makes any needed modifications - is its\n distributor.\n\npackage\n\n A unit of software that can be installed and used within Python.\n That is, this refers to what Python-specific packaging tools tend to\n call a \"distribution package\" or simply a \"distribution\"; the\n colloquial abbreviation \"package\" is used in the sense of the Python\n Package Index.\n\n This document does not use \"package\" in the sense of an importable\n name that contains Python modules, though in many cases, a\n distribution package consists of a single importable package of the\n same name.\n\n This document generally does not use the term \"package\" to refer to\n units of installation by a distro's package manager (such as .deb or\n .rpm files). When needed, it uses phrasing such as \"a distro's\n package.\" (Again, in many cases, a Python package is shipped inside\n a distro's package named something like python- plus the Python\n package name.)\n\nPython-specific package manager\n\n A tool for installing, upgrading, and/or removing Python packages in\n a manner that conforms to Python packaging standards (such as PEP\n 376 and PEP 427). The most popular Python-specific package manager\n is pip[1]; other examples include the old Easy Install command[2] as\n well as direct usage of a setup.py command.\n\n (Conda is a bit of a special case, as the conda command can install\n much more than just Python packages, making it more like a distro\n package manager in some senses. Since the conda command generally\n only operates on Conda-created environments, most of the concerns in\n this document do not apply to conda when acting as a Python-specific\n package manager.)\n\ndistro package manager\n\n A tool for installing, upgrading, and/or removing a distro's\n packages in an installed instance of that distro, which is capable\n of installing Python packages as well as non-Python packages, and\n therefore generally has its own database of installed software\n unrelated to PEP 376. Examples include apt, dpkg, dnf, rpm, pacman,\n and brew. The salient feature is that if a package was installed by\n a distro package manager, removing or upgrading it in a way that\n would satisfy a Python-specific package manager will generally leave\n a distro package manager in an inconsistent state.\n\n This document also uses phrases like \"external package manager\" or\n \"system's package manager\" to refer to a distro package manager in\n certain contexts.\n\nshadow\n\n To shadow an installed Python package is to cause some other package\n to be preferred for imports without removing any files from the\n shadowed package. This requires multiple entries on sys.path: if\n package A 2.0 installs module a.py in one sys.path entry, and\n package A 1.0 installs module a.py in a later sys.path entry, then\n import a returns the module from the former, and we say that A 2.0\n shadows A 1.0.\n\nMotivation\n\nThanks to Python's immense popularity, software distros (by which we\nmean Linux and other OS distros as well as overlay distros like Homebrew\nand MacPorts) generally ship Python for two purposes: as a software\npackage to be used in its own right by end users, and as a language\ndependency for other software in the distro.\n\nFor example, Fedora and Debian (and their downstream distros, as well as\nmany others) ship a /usr/bin/python3 binary which provides the python3\ncommand available to end users as well as the #!/usr/bin/python3 shebang\nfor Python-language software included in the distro. Because there are\nno official binary releases of Python for Linux/UNIX, almost all Python\nend users on these OSes use the Python interpreter built and shipped\nwith their distro.\n\nThe python3 executable available to the users of the distro and the\npython3 executable available as a dependency for other software in the\ndistro are typically the same binary. This means that if an end user\ninstalls a Python package using a tool like pip outside the context of a\nvirtual environment, that package is visible to Python-language software\nshipped by the distro. If the newly-installed package (or one of its\ndependencies) is a newer, backwards-incompatible version of a package\nthat was installed through the distro, it may break software shipped by\nthe distro.\n\nThis may pose a critical problem for the integrity of distros, which\noften have package-management tools that are themselves written in\nPython. For example, it's possible to unintentionally break Fedora's dnf\ncommand with a pip install command, making it hard to recover.\n\nThis applies both to system-wide installs (sudo pip install) as well as\nuser home directory installs (pip install --user), since packages in\neither location show up on the sys.path of /usr/bin/python3.\n\nThere is a worse problem with system-wide installs: if you attempt to\nrecover from this situation with sudo pip uninstall, you may end up\nremoving packages that are shipped by the system's package manager. In\nfact, this can even happen if you simply upgrade a package - pip will\ntry to remove the old version of the package, as shipped by the OS. At\nthis point it may not be possible to recover the system to a consistent\nstate using just the software remaining on the system.\n\nOver the past many years, a consensus has emerged that the best way to\ninstall Python libraries or applications (when not using a distro's\npackage) is to use a virtual environment. This approach was popularized\nby the PyPA virtualenv project, and a simple version of that approach is\nnow available in the Python standard library as venv. Installing a\nPython package into a virtualenv prevents it from being visible to the\nunqualified /usr/bin/python3 interpreter and prevents breaking system\nsoftware.\n\nIn some cases, however, it's useful and intentional to install a Python\npackage from outside of the distro that influences the behavior of\ndistro-shipped commands. This is common in the case of software like\nSphinx or Ansible which have a mechanism for writing Python-language\nextensions. A user may want to use their distro's version of the base\nsoftware (for reasons of paid support or security updates) but install a\nsmall extension from PyPI, and they'd want that extension to be\nimportable by the software in their base system.\n\nWhile this continues to carry the risk of installing a newer version of\na dependency than the operating system expects or otherwise negatively\naffecting the behavior of an application, it does not need to carry the\nrisk of removing files from the operating system. A tool like pip should\nbe able to install packages in some directory on the default sys.path,\nif specifically requested, without deleting files owned by the system's\npackage manager.\n\nTherefore, this PEP proposes two things.\n\nFirst, it proposes a way for distributors of a Python interpreter to\nmark that interpreter as having its packages managed by means external\nto Python, such that Python-specific tools like pip should not change\nthe installed packages in the interpreter's global sys.path in any way\n(add, upgrade/downgrade, or remove) unless specifically overridden. It\nalso provides a means for the distributor to indicate how to use a\nvirtual environment as an alternative.\n\nThis is an opt-in mechanism: by default, the Python interpreter compiled\nfrom upstream sources will not be so marked, and so running pip install\nwith a self-compiled interpreter, or with a distro that has not\nexplicitly marked its interpreter, will work as it always has worked.\n\nSecond, it sets the rule that when installing packages to an\ninterpreter's global context (either to an unmarked interpreter, or if\noverriding the marking), Python-specific package managers should modify\nor delete files only within the directories of the sysconfig scheme in\nwhich they would create files. This permits a distributor of a Python\ninterpreter to set up two directories, one for its own managed packages,\nand one for unmanaged packages installed by the end user, and ensure\nthat installing unmanaged packages will not delete (or overwrite) files\nowned by the external package manager.\n\nRationale\n\nAs described in detail in the next section, the first behavior change\ninvolves creating a marker file named EXTERNALLY-MANAGED, whose presence\nindicates that non-virtual-environment package installations are managed\nby some means external to Python, such as a distro's package manager.\nThis file is specified to live in the stdlib directory in the default\nsysconfig scheme, which marks the interpreter / installation as a whole,\nnot a particular location on sys.path. The reason for this is that, as\nidentified above, there are two related problems that risk breaking an\nexternally-managed Python: you can install an incompatible new version\nof a package system-wide (e.g., with sudo pip install), and you can\ninstall one in your user account alone, but in a location that is on the\nstandard Python command's sys.path (e.g., with pip install --user). If\nthe marker file were in the system-wide site-packages directory, it\nwould not clearly apply to the second case. The Alternatives section has\nfurther discussion of possible locations.\n\nThe second behavior change takes advantage of the existing sysconfig\nsetup in distros that have already encountered this class of problem,\nand specifically addresses the problem of a Python-specific package\nmanager deleting or overwriting files that are owned by an external\npackage manager.\n\nUse cases\n\nThe changed behavior in this PEP is intended to \"do the right thing\" for\nas many use cases as possible. In this section, we consider the changes\nspecified by this PEP for several representative use cases / contexts.\nSpecifically, we ask about the two behaviors that could be changed by\nthis PEP:\n\n1. Will a Python-specific installer tool like pip install permit\n installations by default, after implementation of this PEP?\n2. If you do run such a tool, should it be willing to delete packages\n shipped by the external (non-Python-specific) package manager for\n that context, such as a distro package manager?\n\n(For simplicity, this section discusses pip as the Python-specific\ninstaller tool, though the analysis should apply equally to any other\nPython-specific package management tool.)\n\nThis table summarizes the use cases discussed in detail below:\n\n+------+-------------------+-------------------+-------------------+\n| Case | Description | pip install | Deleting |\n| | | permitted | ext |\n| | | | ernally-installed |\n| | | | packages |\n| | | | permitted |\n+======+===================+===================+===================+\n| 1 | Unpatched CPython | Currently yes; | Currently yes; |\n| | | stays yes | stays yes |\n+------+-------------------+-------------------+-------------------+\n| 2 | Distro | Currently yes; | Currently yes |\n| | /usr/bin/python3 | becomes no | (except on |\n| | | (assuming the | Debian); becomes |\n| | | distro adds a | no |\n| | | marker file) | |\n+------+-------------------+-------------------+-------------------+\n| 3 | Distro Python in | Currently yes; | There are no |\n| | venv | stays yes | ext |\n| | | | ernally-installed |\n| | | | packages |\n+------+-------------------+-------------------+-------------------+\n| 4 | Distro Python in | Currently yes; | Currently no; |\n| | venv with | stays yes | stays no |\n| | --sys | | |\n| | tem-site-packages | | |\n+------+-------------------+-------------------+-------------------+\n| 5 | Distro Python in | Currently yes; | Currently yes; |\n| | Docker | becomes no | becomes no |\n| | | (assuming the | |\n| | | distro adds a | |\n| | | marker file) | |\n+------+-------------------+-------------------+-------------------+\n| 6 | Conda environment | Currently yes; | Currently yes; |\n| | | stays yes | stays yes |\n+------+-------------------+-------------------+-------------------+\n| 7 | Dev-facing distro | Currently yes; | Currently often |\n| | | becomes no | yes; becomes no |\n| | | (assuming they | (assuming they |\n| | | add a marker | configure |\n| | | file) | sysconfig as |\n| | | | needed) |\n+------+-------------------+-------------------+-------------------+\n| 8 | Distro building | Currently yes; | Currently yes; |\n| | packages | can stay yes | becomes no |\n+------+-------------------+-------------------+-------------------+\n| 9 | PYTHONHOME copied | Currently yes; | Currently yes; |\n| | from a distro | becomes no | becomes no |\n| | Python stdlib | | |\n+------+-------------------+-------------------+-------------------+\n| 10 | PYTHONHOME copied | Currently yes; | Currently yes; |\n| | from upstream | stays yes | stays yes |\n| | Python stdlib | | |\n+------+-------------------+-------------------+-------------------+\n\nIn more detail, the use cases above are:\n\n1. A standard unpatched CPython, without any special configuration of\n or patches to sysconfig and without a marker file. This PEP does not\n change its behavior.\n\n Such a CPython should (regardless of this PEP) not be installed in a\n way that overlaps any distro-installed Python on the same system.\n For instance, on an OS that ships Python in /usr/bin, you should not\n install a custom CPython built with ./configure --prefix=/usr, or it\n will overwrite some files from the distro and the distro will\n eventually overwrite some files from your installation. Instead,\n your installation should be in a separate directory (perhaps\n /usr/local, /opt, or your home directory).\n\n Therefore, we can assume that such a CPython has its own stdlib\n directory and its own sysconfig schemes that do not overlap any\n distro-installed Python. So any OS-installed packages are not\n visible or relevant here.\n\n If there is a concept of \"externally-installed\" packages in this\n case, it's something outside the OS and generally managed by whoever\n built and installed this CPython. Because the installer chose not to\n add a marker file or modify sysconfig schemes, they're choosing the\n current behavior, and pip install can remove any packages available\n in this CPython.\n\n2. A distro's /usr/bin/python3, either when running pip install as root\n or pip install --user, following our Recommendations for distros.\n\n These recommendations include shipping a marker file in the stdlib\n directory, to prevent pip install by default, and placing\n distro-shipped packages in a location other than the default\n sysconfig scheme, so that pip as root does not write to that\n location.\n\n Many distros (including Debian, Fedora, and their derivatives) are\n already doing the latter.\n\n On Debian and derivatives, pip install does not currently delete\n distro-installed packages, because Debian carries a patch to pip to\n prevent this. So, for those distros, this PEP is not a behavior\n change; it simply standardizes that behavior in a way that is no\n longer Debian-specific and can be included into upstream pip.\n\n (We have seen user reports of externally-installed packages being\n deleted on Debian or a derivative. We suspect this is because the\n user has previously run sudo pip install --upgrade pip and therefore\n now has a version of /usr/bin/pip without the Debian patch;\n standardizing this behavior in upstream package installers would\n address this problem.)\n\n3. A distro Python when used inside a virtual environment (either from\n venv or virtualenv).\n\n Inside a virtual environment, all packages are owned by that\n environment. Even when pip, setuptools, etc. are installed into the\n environment, they are and should be managed by tools specific to\n that environment; they are not system-managed.\n\n4. A distro Python when used inside a virtual environment with\n --system-site-packages. This is like the previous case, but worth\n calling out explicitly, because anything on the global sys.path is\n visible.\n\n Currently, the answer to \"Will pip delete externally-installed\n packages\" is no, because pip has a special case for running in a\n virtual environment and attempting to delete packages outside it.\n After this PEP, the answer remains no, but the reasoning becomes\n more general: system site packages will be outside any of the\n sysconfig schemes used for package management in the environment.\n\n5. A distro Python when used in a single-application container image\n (e.g., a Docker container). In this use case, the risk of breaking\n system software is lower, since generally only a single application\n runs in the container, and the impact is lower, since you can\n rebuild the container and you don't have to struggle to recover a\n running machine. There are also a large number of existing\n Dockerfiles with an unqualified RUN pip install ... statement, etc.,\n and it would be good not to break those. So, builders of base\n container images may want to ensure that the marker file is not\n present, even if the underlying OS ships one by default.\n\n There is a small behavior change: currently, pip run as root will\n delete externally-installed packages, but after this PEP it will\n not. We don't propose a way to override this. However, since the\n base image is generally minimal, there shouldn't be much of a use\n case for simply uninstalling packages (especially without using the\n distro's own tools). The common case is when pip wants to upgrade a\n package, which previously would have deleted the old version (except\n on Debian). After this change, the old version will still be on\n disk, but pip will still shadow externally-installed packages, and\n we believe this to be sufficient for this not to be a breaking\n change in practice - a Python import statement will still get you\n the newly-installed package.\n\n If it becomes necessary to have a way to do this, we suggest that\n the distro should document a way for the installer tool to access\n the sysconfig scheme used by the distro itself. See the\n Recommendations for distros section for more discussion.\n\n It is the view of the authors of this PEP that it's still a good\n idea to use virtual environments with distro-installed Python\n interpreters, even in single-application container images. Even\n though they run a single application, that application may run\n commands from the OS that are implemented in Python, and if you've\n installed or upgraded the distro-shipped Python packages using\n Python-specific tools, those commands may break.\n\n6. Conda specifically supports the use of non-conda tools like pip to\n install software not available in the Conda repositories. In this\n context, Conda acts as the external package manager / distro and pip\n as the Python-specific one.\n\n In some sense, this is similar to the first case, since Conda\n provides its own installation of the Python interpreter.\n\n We don't believe this PEP requires any changes to Conda, and\n versions of pip that have implemented the changes in this PEP will\n continue to behave as they currently do inside Conda environments.\n (That said, it may be worth considering whether to use separate\n sysconfig schemes for pip-installed and Conda-installed software,\n for the same reasons it's a good idea for other distros.)\n\n7. By a \"developer-facing distro,\" we mean a specific type of distro\n where direct users of Python or other languages in the distro are\n expected or encouraged to make changes to the distro itself if they\n wish to add libraries. Common examples include private \"monorepos\"\n at software development companies, where a single repository builds\n both third-party and in-house software, and the direct users of the\n distro's Python interpreter are generally software developers\n writing said in-house software. User-level package managers like\n Nixpkgs may also count, because they encourage users of Nix who are\n Python developers to package their software for Nix.\n\n In these cases, the distro may want to respond to an attempted\n pip install with guidance encouraging use of the distro's own\n facilities for adding new packages, along with a link to\n documentation.\n\n If the distro supports/encourages creating a virtual environment\n from the distro's Python interpreter, there may also be custom\n instructions for how to properly set up a virtual environment (as\n for example Nixpkgs does).\n\n8. When building distro Python packages for a distro Python (case 2),\n it may be useful to have pip install be usable as part of the\n distro's package build process. (Consider, for instance, building a\n python-xyz RPM by using pip install . inside an sdist / source\n tarball for xyz.) The distro may also want to use a more targeted\n but still Python-specific installation tool such as installer.\n\n For this case, the build process will need to find some way to\n suppress the marker file to allow pip install to work, and will\n probably need to point the Python-specific tool at the distro's\n sysconfig scheme instead of the shipped default. See the\n Recommendations for distros section for more discussion on how to\n implement this.\n\n As a result of this PEP, pip will no longer be able to remove\n packages already on the system. However, this behavior change is\n fine because a package build process should not (and generally\n cannot) include instructions to delete some other files on the\n system; it can only package up its own files.\n\n9. A distro Python used with PYTHONHOME to set up an alternative Python\n environment (as opposed to a virtual environment), where PYTHONHOME\n is set to some directory copied directly from the distro Python\n (e.g., cp -a /usr/lib/python3.x pyhome/lib).\n\n Assuming there are no modifications, then the behavior is just like\n the underlying distro Python (case 2). So there are behavior\n changes - you can no longer pip install by default, and if you\n override it, it will no longer delete externally-installed packages\n (i.e., Python packages that were copied from the OS and live in the\n OS-managed sys.path entry).\n\n This behavior change seems to be defensible, in that if your\n PYTHONHOME is a straight copy of the distro's Python, it should\n behave like the distro's Python.\n\n10. A distro Python (or any Python interpreter) used with a PYTHONHOME\n taken from a compatible unmodified upstream Python.\n\n Because the behavior changes in this PEP are keyed off of files in\n the standard library (the marker file in stdlib and the behavior of\n the sysconfig module), the behavior is just like an unmodified\n upstream CPython (case 1).\n\nSpecification\n\nMarking an interpreter as using an external package manager\n\nBefore a Python-specific package installer (that is, a tool such as\npip - not an external tool such as apt) installs a package into a\ncertain Python context, it should make the following checks by default:\n\n1. Is it running outside of a virtual environment? It can determine\n this by whether sys.prefix == sys.base_prefix (but see Backwards\n Compatibility).\n2. Is there an EXTERNALLY-MANAGED file in the directory identified by\n sysconfig.get_path(\"stdlib\", sysconfig.get_default_scheme())?\n\nIf both of these conditions are true, the installer should exit with an\nerror message indicating that package installation into this Python\ninterpreter's directory are disabled outside of a virtual environment.\n\nThe installer should have a way for the user to override these rules,\nsuch as a command-line flag --break-system-packages. This option should\nnot be enabled by default and should carry some connotation that its use\nis risky.\n\nThe EXTERNALLY-MANAGED file is an INI-style metadata file intended to be\nparsable by the standard library configparser module. If the file can be\nparsed by configparser.ConfigParser(interpolation=None) using the UTF-8\nencoding, and it contains a section [externally-managed], then the\ninstaller should look for an error message specified in the file and\noutput it as part of its error. If the first element of the tuple\nreturned by locale.getlocale(locale.LC_MESSAGES), i.e., the language\ncode, is not None, it should look for the error message as the value of\na key named Error- followed by the language code. If that key does not\nexist, and if the language code contains underscore or hyphen, it should\nlook for a key named Error- followed by the portion of the language code\nbefore the underscore or hyphen. If it cannot find either of those, or\nif the language code is None, it should look for a key simply named\nError.\n\nIf the installer cannot find an error message in the file (either\nbecause the file cannot be parsed or because no suitable error key\nexists), then the installer should just use a pre-defined error message\nof its own, which should suggest that the user create a virtual\nenvironment to install packages.\n\nSoftware distributors who have a non-Python-specific package manager\nthat manages libraries in the sys.path of their Python package should,\nin general, ship a EXTERNALLY-MANAGED file in their standard library\ndirectory. For instance, Debian may ship a file in\n/usr/lib/python3.9/EXTERNALLY-MANAGED consisting of something like\n\n [externally-managed]\n Error=To install Python packages system-wide, try apt install\n python3-xyz, where xyz is the package you are trying to\n install.\n\n If you wish to install a non-Debian-packaged Python package,\n create a virtual environment using python3 -m venv path/to/venv.\n Then use path/to/venv/bin/python and path/to/venv/bin/pip. Make\n sure you have python3-full installed.\n\n If you wish to install a non-Debian packaged Python application,\n it may be easiest to use pipx install xyz, which will manage a\n virtual environment for you. Make sure you have pipx installed.\n\n See /usr/share/doc/python3.9/README.venv for more information.\n\nwhich provides useful and distro-relevant information to a user trying\nto install a package. Optionally, translations can be provided in the\nsame file:\n\n Error-de_DE=Wenn ist das Nunstück git und Slotermeyer?\n\n Ja! Beiherhund das Oder die Virtualenvironment gersput!\n\nIn certain contexts, such as single-application container images that\naren't updated after creation, a distributor may choose not to ship an\nEXTERNALLY-MANAGED file, so that users can install whatever they like\n(as they can today) without having to manually override this rule.\n\nWriting to only the target sysconfig scheme\n\nUsually, a Python package installer installs to directories in a scheme\nreturned by the sysconfig standard library package. Ordinarily, this is\nthe scheme returned by sysconfig.get_default_scheme(), but based on\nconfiguration (e.g. pip install --user), it may use a different scheme.\n\nWhenever the installer is installing to a sysconfig scheme, this PEP\nspecifies that the installer should never modify or delete files outside\nof that scheme. For instance, if it's upgrading a package, and the\npackage is already installed in a directory outside that scheme (perhaps\nin a directory from another scheme), it should leave the existing files\nalone.\n\nIf the installer does end up shadowing an existing installation during\nan upgrade, we recommend that it produces a warning at the end of its\nrun.\n\nIf the installer is installing to a location outside of a sysconfig\nscheme (e.g., pip install --target), then this subsection does not\napply.\n\nRecommendations for distros\n\nThis section is non-normative. It provides best practices we believe\ndistros should follow unless they have a specific reason otherwise.\n\nMark the installation as externally managed\n\nDistros should create an EXTERNALLY-MANAGED file in their stdlib\ndirectory.\n\nGuide users towards virtual environments\n\nThe file should contain a useful and distro-relevant error message\nindicating both how to install system-wide packages via the distro's\npackage manager and how to set up a virtual environment. If your distro\nis often used by users in a state where the python3 command is available\n(and especially where pip or get-pip is available) but python3 -m venv\ndoes not work, the message should indicate clearly how to make\npython3 -m venv work properly.\n\nConsider packaging pipx, a tool for installing Python-language\napplications, and suggesting it in the error. pipx automatically creates\na virtual environment for that application alone, which is a much better\ndefault for end users who want to install some Python-language software\n(which isn't available in the distro) but are not themselves Python\nusers. Packaging pipx in the distro avoids the irony of instructing\nusers to pip install --user --break-system-packages pipx to avoid\nbreaking system packages. Consider arranging things so your distro's\npackage / environment for Python for end users (e.g., python3 on Fedora\nor python3-full on Debian) depends on pipx.\n\nKeep the marker file in container images\n\nDistros that produce official images for single-application containers\n(e.g., Docker container images) should keep the EXTERNALLY-MANAGED file,\npreferably in a way that makes it not go away if a user of that image\ninstalls package updates inside their image (think\nRUN apt-get dist-upgrade).\n\nCreate separate distro and local directories\n\nDistros should place two separate paths on the system interpreter's\nsys.path, one for distro-installed packages and one for packages\ninstalled by the local system administrator, and configure\nsysconfig.get_default_scheme() to point at the latter path. This ensures\nthat tools like pip will not modify distro-installed packages. The path\nfor the local system administrator should come before the distro path on\nsys.path so that local installs take preference over distro packages.\n\nFor example, Fedora and Debian (and their derivatives) both implement\nthis split by using /usr/local for locally-installed packages and /usr\nfor distro-installed packages. Fedora uses\n/usr/local/lib/python3.x/site-packages vs.\n/usr/lib/python3.x/site-packages. (Debian uses\n/usr/local/lib/python3/dist-packages vs. /usr/lib/python3/dist-packages\nas an additional layer of separation from a locally-compiled Python\ninterpreter: if you build and install upstream CPython in\n/usr/local/bin, it will look at /usr/local/lib/python3/site-packages,\nand Debian wishes to make sure that packages installed via the\nlocally-built interpreter don't show up on sys.path for the distro\ninterpreter.)\n\nNote that the /usr/local vs. /usr split is analogous to how the PATH\nenvironment variable typically includes /usr/local/bin:/usr/bin and\nnon-distro software installs to /usr/local by default. This split is\nrecommended by the Filesystem Hierarchy Standard.\n\nThere are two ways you could do this. One is, if you are building and\npackaging Python libraries directly (e.g., your packaging helpers unpack\na PEP 517-built wheel or call setup.py install), arrange for those tools\nto use a directory that is not in a sysconfig scheme but is still on\nsys.path.\n\nThe other is to arrange for the default sysconfig scheme to change when\nrunning inside a package build versus when running on an installed\nsystem. The sysconfig customization hooks from bpo-43976 should make\nthis easy (once accepted and implemented): make your packaging tool set\nan environment variable or some other detectable configuration, and\ndefine a get_preferred_schemes function to return a different scheme\nwhen called from inside a package build. Then you can use pip install as\npart of your distro packaging.\n\nWe propose adding a --scheme=... option to instruct pip to run against a\nspecific scheme. (See Implementation Notes below for how pip currently\ndetermines schemes.) Once that's available, for local testing and\npossibly for actual packaging, you would be able to run something like\npip install --scheme=posix_distro to explicitly install a package into\nyour distro's location (bypassing get_preferred_schemes). One could\nalso, if absolutely needed, use pip uninstall --scheme=posix_distro to\nuse pip to remove packages from the system-managed directory, which\naddresses the (hopefully theoretical) regression in use case 5 in\nRationale.\n\nTo install packages with pip, you would also need to either suppress the\nEXTERNALLY-MANAGED marker file to allow pip to run or to override it on\nthe command line. You may want to use the same means for suppressing the\nmarker file in build chroots as you do in container images.\n\nThe advantage of setting these up to be automatic (suppressing the\nmarker file in your build environment and having get_preferred_schemes\nautomatically return your distro's scheme) is that an unadorned\npip install will work inside a package build, which generally means that\nan unmodified upstream build script that happens to internally call\npip install will do the right thing. You can, of course, just ensure\nthat your packaging process always calls\npip install --scheme=posix_distro --break-system-packages, which would\nwork too.\n\nThe best approach here depends a lot on your distro's conventions and\nmechanisms for packaging.\n\nSimilarly, the sysconfig paths that are not for importable Python code -\nthat is, include, platinclude, scripts, and data - should also have two\nvariants, one for use by distro-packaged software and one for use for\nlocally-installed software, and the distro should be set up such that\nboth are usable. For instance, a typical FHS-compliant distro will use\n/usr/local/include for the default scheme's include and /usr/include for\ndistro-packaged headers and place both on the compiler's search path,\nand it will use /usr/local/bin for the default scheme's scripts and\n/usr/bin for distro-packaged entry points and place both on $PATH.\n\nBackwards Compatibility\n\nAll of these mechanisms are proposed for new distro releases and new\nversions of tools like pip only.\n\nIn particular, we strongly recommend that distros with a concept of\nmajor versions only add the marker file or change sysconfig schemes in a\nnew major version; otherwise there is a risk that, on an existing\nsystem, software installed via a Python-specific package manager now\nbecomes unmanageable (without an override option). For a rolling-release\ndistro, if possible, only add the marker file or change sysconfig\nschemes in a new Python minor version.\n\nOne particular backwards-compatibility difficulty for package\ninstallation tools is likely to be managing environments created by old\nversions of virtualenv which have the latest version of the tool\ninstalled. A \"virtual environment\" now has a fairly precise definition:\nit uses the pyvenv.cfg mechanism, which causes\nsys.base_prefix != sys.prefix. It is possible, however, that a user may\nhave an old virtual environment created by an older version of\nvirtualenv; as of this writing, pip supports Python 3.6 onwards, which\nis in turn supported by virtualenv 15.1.0 onwards, so this scenario is\npossible. In older versions of virtualenv, the mechanism is instead to\nset a new attribute, sys.real_prefix, and it does not use the standard\nlibrary support for virtual environments, so sys.base_prefix is the same\nas sys.prefix. So the logic for robustly detecting a virtual environment\nis something like:\n\n def is_virtual_environment():\n return sys.base_prefix != sys.prefix or hasattr(sys, \"real_prefix\")\n\nSecurity Implications\n\nThe purpose of this feature is not to implement a security boundary; it\nis to discourage well-intended changes from unexpectedly breaking a\nuser's environment. That is to say, the reason this PEP restricts\npip install outside a virtual environment is not that it's a security\nrisk to be able to do so; it's that \"There should be one--and preferably\nonly one --obvious way to do it,\" and that way should be using a virtual\nenvironment. pip install outside a virtual environment is rather too\nobvious for what is almost always the wrong way to do it.\n\nIf there is a case where a user should not be able to sudo pip install\nor pip install --user and add files to sys.path for security reasons,\nthat needs to be implemented either via access control rules on what\nfiles the user can write to or an explicitly secured sys.path for the\nprogram in question. Neither of the mechanisms in this PEP should be\ninterpreted as a way to address such a scenario.\n\nFor those reasons, an attempted install with a marker file present is\nnot a security incident, and there is no need to raise an auditing event\nfor it. If the calling user legitimately has access to sudo pip install\nor pip install --user, they can accomplish the same installation\nentirely outside of Python; if they do not legitimately have such\naccess, that's a problem outside the scope of this PEP.\n\nThe marker file itself is located in the standard library directory,\nwhich is a trusted location (i.e., anyone who can write to the marker\nfile used by a particular installer could, presumably, run arbitrary\ncode inside the installer). Therefore, there is generally no need to\nfilter out terminal escape sequences or other potentially-malicious\ncontent in the error message.\n\nAlternatives\n\nThere are a number of similar proposals we considered that this PEP\nrejects or defers, largely to preserve the behavior in the case-by-case\nanalysis in Rationale.\n\nMarker file\n\nShould the marker file be in sys.path, marking a particular directory as\nnot to be written to by a Python-specific package manager? This would\nhelp with the second problem addressed by this PEP (not overwriting\ndeleting distro-owned files) but not the first (incompatible installs).\nA directory-specific marker in /usr/lib/python3.x/site-packages would\nnot discourage installations into either\n/usr/local/lib/python3.x/site-packages or\n~/.local/lib/python3.x/site-packages, both of which are on sys.path for\n/usr/bin/python3. In other words, the marker file should not be\ninterpreted as marking a single directory as externally managed (even\nthough it happens to be in a directory on sys.path); it marks the entire\nPython installation as externally managed.\n\nAnother variant of the above: should the marker file be in sys.path,\nwhere if it can be found in any directory in sys.path, it marks the\ninstallation as externally managed? An apparent advantage of this\napproach is that it automatically disables itself in virtual\nenvironments. Unfortunately, This has the wrong behavior with a\n--system-site-packages virtual environment, where the system-wide\nsys.path is visible but package installations are allowed. (It could\nwork if the rule of exempting virtual environments is preserved, but\nthat seems to have no advantage over the current scheme.)\n\nShould the marker just be a new attribute of a sysconfig scheme? There\nis some conceptual cleanliness to this, except that it's hard to\noverride. We want to make it easy for container images, package build\nenvironments, etc. to suppress the marker file. A file that you can\nremove is easy; code in sysconfig is much harder to modify.\n\nShould the file be in /etc? No, because again, it refers to a specific\nPython installation. A user who installs their own Python may well want\nto install packages within the global context of that interpreter.\n\nShould the configuration setting be in pip.conf or distutils.cfg? Apart\nfrom the above objections about marking an installation, this mechanism\nisn't specific to either of those tools. (It seems reasonable for pip to\nalso implement a configuration flag for users to prevent themselves from\nperforming accidental non-virtual-environment installs in any Python\ninstallation, but that is outside the scope of this PEP.)\n\nShould the file be TOML? TOML is gaining popularity for packaging (see\ne.g. PEP 517) but does not yet have an implementation in the standard\nlibrary. Strictly speaking, this isn't a blocker - distros need only\nwrite the file, not read it, so they don't need a TOML library (the file\nwill probably be written by hand, regardless of format), and packaging\ntools likely have a TOML reader already. However, the INI format is\ncurrently used for various other forms of packaging metadata (e.g.,\npydistutils.cfg and setup.cfg), meets our needs, and is parsable by the\nstandard library, and the pip maintainers expressed a preference to\navoid using TOML for this yet.\n\nShould the file be email.message-style? While this format is also used\nfor packaging metadata (e.g. sdist and wheel metadata) and is also\nparsable by the standard library, it doesn't handle multi-line entries\nquite as clearly, and that is our primary use case.\n\nShould the marker file be executable Python code that evaluates whether\ninstallation should be allowed or not? Apart from the concerns above\nabout having the file in sys.path, we have a concern that making it\nexecutable is committing to too powerful of an API and risks making\nbehavior harder to understand. (Note that the get_default_scheme hook of\nbpo-43976 is in fact executable, but that code needs to be supplied when\nthe interpreter builds; it isn't intended to be supplied post-build.)\n\nWhen overriding the marker, should a Python-specific package manager be\ndisallowed from shadowing a package installed by the external package\nmanager (i.e., installing modules of the same name)? This would minimize\nthe risk of breaking system software, but it's not clear it's worth the\nadditional user experience complexity. There are legitimate use cases\nfor shadowing system packages, and an additional command-line option to\npermit it would be more confusing. Meanwhile, not passing that option\nwouldn't eliminate the risk of breaking system software, which may be\nrelying on a try: import xyz failing, finding a limited set of entry\npoints, etc. Communicating this distinction seems difficult. We think\nit's a good idea for Python-specific package managers to print a warning\nif they shadow a package, but we think it's not worth disabling it by\ndefault.\n\nWhy not use the INSTALLER file from PEP 376 to determine who installed a\npackage and whether it can be removed? First, it's specific to a\nparticular package (it's in the package's dist-info directory), so like\nsome of the alternatives above, it doesn't provide information on an\nentire environment and whether package installations are permissible.\nPEP 627 also updates PEP 376 to prevent programmatic use of INSTALLER,\nspecifying that the file is \"to be used for informational purposes only.\n[...] Our goal is supporting interoperating tools, and basing any action\non which tool happened to install a package runs counter to that goal.\"\nFinally, as PEP 627 envisions, there are legitimate use cases for one\ntool knowing how to handle packages installed by another tool; for\ninstance, conda can safely remove a package installed by pip into a\nConda environment.\n\nWhy does the specification give no means for disabling package\ninstallations inside a virtual environment? We can't see a particularly\nstrong use case for it (at least not one related to the purposes of this\nPEP). If you need it, it's simple enough to pip uninstall pip inside\nthat environment, which should discourage at least unintentional changes\nto the environment (and this specification makes no provision to disable\nintentional changes, since after all the marker file can be easily\nremoved).\n\nSystem Python\n\nShouldn't distro software just run with the distro site-packages\ndirectory alone on sys.path and ignore the local system administrator's\nsite-packages as well as the user-specific one? This is a worthwhile\nidea, and various versions of it have been circulating for a while under\nthe name of \"system Python\" or \"platform Python\" (with a separate \"user\nPython\" for end users writing Python or installing Python software\nseparate from the system). However, it's much more involved of a change.\nFirst, it would be a backwards-incompatible change. As mentioned in the\nMotivation section, there are valid use cases for running\ndistro-installed Python applications like Sphinx or Ansible with\nlocally-installed Python libraries available on their sys.path. A\nwholesale switch to ignoring local packages would break these use cases,\nand a distro would have to make a case-by-case analysis of whether an\napplication ought to see locally-installed libraries or not.\n\nFurthermore, Fedora attempted this change and reverted it, finding,\nironically, that their implementation of the change broke their package\nmanager. Given that experience, there are clearly details to be worked\nout before distros can reliably implement that approach, and a PEP\nrecommending it would be premature.\n\nThis PEP is intended to be a complete and self-contained change that is\nindependent of a distributor's decision for or against \"system Python\"\nor similar proposals. It is not incompatible with a distro implementing\n\"system Python\" in the future, and even though both proposals address\nthe same class of problems, there are still arguments in favor of\nimplementing something like \"system Python\" even after implementing this\nPEP. At the same time, though, this PEP specifically tries to make a\nmore targeted and minimal change, such that it can be implemented by\ndistributors who don't expect to adopt \"system Python\" (or don't expect\nto implement it immediately). The changes in this PEP stand on their own\nmerits and are not an intermediate step for some future proposal. This\nPEP reduces (but does not eliminate) the risk of breaking system\nsoftware while minimizing (but not completely avoiding) breaking\nchanges, which should therefore be much easier to implement than the\nfull \"system Python\" idea, which comes with the downsides mentioned\nabove.\n\nWe expect that the guidance in this PEP - that users should use virtual\nenvironments whenever possible and that distros should have separate\nsys.path directories for distro-managed and locally-managed modules -\nshould make further experiments easier in the future. These may include\ndistributing wholly separate \"system\" and \"user\" Python interpreters,\nrunning system software out of a distro-owned virtual environment or\nPYTHONHOME (but shipping a single interpreter), or modifying the entry\npoints for certain software (such as the distro's package manager) to\nuse a sys.path that only sees distro-managed directories. Those ideas\nthemselves, however, remain outside the scope of this PEP.\n\nImplementation Notes\n\nThis section is non-normative and contains notes relevant to both the\nspecification and potential implementations.\n\nCurrently, pip does not directly expose a way to choose a target\nsysconfig scheme, but it has three ways of looking up schemes when\ninstalling:\n\npip install\n\n Calls sysconfig.get_default_scheme(), which is usually (in upstream\n CPython and most current distros) the same as\n get_preferred_scheme('prefix').\n\npip install --prefix=/some/path\n\n Calls sysconfig.get_preferred_scheme('prefix').\n\npip install --user\n\n Calls sysconfig.get_preferred_scheme('user').\n\nFinally, pip install --target=/some/path writes directly to /some/path\nwithout looking up any schemes.\n\nDebian currently carries a patch to change the default install location\ninside a virtual environment__, using a few heuristics (including\nchecking for the VIRTUAL_ENV environment variable), largely so that the\ndirectory used in a virtual environment remains site-packages and not\ndist-packages. This does not particularly affect this proposal, because\nthe implementation of that patch does not actually change the default\nsysconfig scheme, and notably does not change the result of\nsysconfig.get_path(\"stdlib\").\n\nFedora currently carries a patch to change the default install location\nwhen not running inside rpmbuild__, which they use to implement the\ntwo-system-wide-directories approach. This is conceptually the sort of\nhook envisioned by bpo-43976, except implemented as a code patch to\ndistutils instead of as a changed sysconfig scheme.\n\nThe implementation of is_virtual_environment above, as well as the logic\nto load the EXTERNALLY-MANAGED file and find the error message from it,\nmay as well get added to the standard library (sys and sysconfig,\nrespectively), to centralize their implementations, but they don't need\nto be added yet.\n\nReferences\n\nFor additional background on these problems and previous attempts to\nsolve them, see Debian bug 771794 \"pip silently removes/updates system\nprovided python packages\" from 2014, Fedora's 2018 article Making sudo\npip safe about pointing sudo pip at /usr/local (which acknowledges that\nthe changes still do not make sudo pip completely safe), pip issues 5605\n(\"Disable upgrades to existing python modules which were not installed\nvia pip\") and 5722 (\"pip should respect /usr/local\") from 2018, and the\npost-PyCon US 2019 discussion thread Playing nice with external package\nmanagers.\n\nCopyright\n\nThis document is placed in the public domain or under the\nCC0-1.0-Universal license, whichever is more permissive.\n\n[1] https://pip.pypa.io/en/stable/\n\n[2] https://setuptools.readthedocs.io/en/latest/deprecated/easy_install.html\n(Note that the easy_install command was removed in setuptools version\n52, released 23 January 2021.)"},"source":{"kind":"string","value":"python-peps"},"added":{"kind":"string","value":"2024-10-18T13:23:32.845223"},"created":{"kind":"timestamp","value":"2021-05-18T00:00:00","string":"2021-05-18T00:00:00"},"metadata":{"kind":"string","value":"{\n \"license\": \"Public Domain\",\n \"url\": \"https://peps.python.org/pep-0668/\",\n \"authors\": [\n \"Geoffrey Thomas\"\n ],\n \"pep_number\": \"0668\",\n \"pandoc_version\": \"3.5\"\n}"}}},{"rowIdx":544,"cells":{"id":{"kind":"string","value":"0394"},"text":{"kind":"string","value":"PEP: 394 Title: The \"python\" Command on Unix-Like Systems Author:\nKerrick Staley , Alyssa Coghlan\n, Barry Warsaw , Petr Viktorin\n, Miro Hrončok , Carol Willing\n, Status: Active Type: Informational Created:\n02-Mar-2011 Post-History: 04-Mar-2011, 20-Jul-2011, 16-Feb-2012,\n30-Sep-2014, 28-Apr-2018, 26-Jun-2019 Resolution:\nhttps://mail.python.org/pipermail/python-dev/2012-February/116594.html\n\nAbstract\n\nThis PEP outlines the behavior of Python scripts when the python command\nis invoked. Depending on a distribution or system configuration, python\nmay or may not be installed. If python is installed its target\ninterpreter may refer to python2 or python3. End users may be unaware of\nthis inconsistency across Unix-like systems. This PEP's goal is to\nreduce user confusion about what python references and what will be the\nscript's behavior.\n\nThe recommendations in the next section of this PEP will outline the\nbehavior when:\n\n- using virtual environments\n- writing cross-platform scripts with shebangs for either python2 or\n python3\n\nThe PEP's goal is to clarify the behavior for script end users,\ndistribution providers, and script maintainers / authors.\n\nRecommendation\n\nOur recommendations are detailed below. We call out any expectations\nthat these recommendations are based upon.\n\nFor Python runtime distributors\n\n- We expect Unix-like software distributions (including systems like\n macOS and Cygwin) to install the python2 command into the default\n path whenever a version of the Python 2 interpreter is installed,\n and the same for python3 and the Python 3 interpreter.\n- When invoked, python2 should run some version of the Python 2\n interpreter, and python3 should run some version of the Python 3\n interpreter.\n- If the python command is installed, it is expected to invoke either\n the same version of Python as the python3 command or as the python2\n command.\n- Distributors may choose to set the behavior of the python command as\n follows:\n - python2,\n - python3,\n - not provide python command, allow python to be configurable by\n an end user or a system administrator.\n- The Python 3.x idle, pydoc, and python-config commands should\n likewise be available as idle3, pydoc3, and python3-config; Python\n 2.x versions as idle2, pydoc2, and python2-config. The commands with\n no version number should either invoke the same version of Python as\n the python command, or not be available at all.\n- When packaging third party Python scripts, distributors are\n encouraged to change less specific shebangs to more specific ones.\n This ensures software is used with the latest version of Python\n available, and it can remove a dependency on Python 2. The details\n on what specifics to set are left to the distributors; though.\n Example specifics could include:\n - Changing python shebangs to python3 when Python 3.x is\n supported.\n - Changing python shebangs to python2 when Python 3.x is not yet\n supported.\n - Changing python3 shebangs to python3.8 if the software is built\n with Python 3.8.\n- When a virtual environment (created by the PEP 405 venv package or a\n similar tool such as virtualenv or conda) is active, the python\n command should refer to the virtual environment's interpreter and\n should always be available. The python3 or python2 command\n (according to the environment's interpreter version) should also be\n available.\n\nFor Python script publishers\n\n- When reinvoking the interpreter from a Python script, querying\n sys.executable to avoid hardcoded assumptions regarding the\n interpreter location remains the preferred approach.\n- Encourage your end users to use a virtual environment. This makes\n the user's environment more predictable (possibly resulting in fewer\n issues), and helps avoid disrupting their system.\n- For scripts that are only expected to be run in an activated virtual\n environment, shebang lines can be written as #!/usr/bin/env python,\n as this instructs the script to respect the active virtual\n environment.\n- In cases where the script is expected to be executed outside virtual\n environments, developers will need to be aware of the following\n discrepancies across platforms and installation methods:\n - Older Linux distributions will provide a python command that\n refers to Python 2, and will likely not provide a python2\n command.\n - Some newer Linux distributions will provide a python command\n that refers to Python 3.\n - Some Linux distributions will not provide a python command at\n all by default, but will provide a python3 command by default.\n- When potentially targeting these environments, developers may either\n use a Python package installation tool that rewrites shebang lines\n for the installed environment, provide instructions on updating\n shebang lines interactively, or else use more specific shebang lines\n that are tailored to the target environment.\n- Scripts targeting both “old systems” and systems without the default\n python command need to make a compromise and document this\n situation. Avoiding shebangs (via the console_scripts Entry Points\n ([1]) or similar means) is the recommended workaround for this\n problem.\n- Applications designed exclusively for a specific environment (such\n as a container or virtual environment) may continue to use the\n python command name.\n\nFor end users of Python\n\n- While far from being universally available, python remains the\n preferred spelling for explicitly invoking Python, as this is the\n spelling that virtual environments make consistently available\n across different platforms and Python installations.\n- For software that is not distributed with (or developed for) your\n system, we recommend using a virtual environment, possibly with an\n environment manager like conda or pipenv, to help avoid disrupting\n your system Python installation.\n\nThese recommendations are the outcome of the relevant python-dev\ndiscussions in March and July 2011 ([2],[3]), February 2012 ([4]),\nSeptember 2014 ([5]), discussion on GitHub in April 2018 ([6]), on\npython-dev in February 2019 ([7]), and during the PEP update review in\nMay/June 2019 ([8]).\n\nHistory of this PEP\n\nIn 2011, the majority of distributions aliased the python command to\nPython 2, but some started switching it to Python 3 ([9]). As some of\nthe former distributions did not provide a python2 command by default,\nthere was previously no way for Python 2 code (or any code that invokes\nthe Python 2 interpreter directly rather than via sys.executable) to\nreliably run on all Unix-like systems without modification, as the\npython command would invoke the wrong interpreter version on some\nsystems, and the python2 command would fail completely on others. This\nPEP originally provided a very simple mechanism to restore\ncross-platform support, with minimal additional work required on the\npart of distribution maintainers. Simplified, the recommendation was:\n\n1. The python command was preferred for code compatible with both\n Python 2 and 3 (since it was available on all systems, even those\n that already aliased it to Python 3).\n2. The python command should always invoke Python 2 (to prevent\n hard-to-diagnose errors when Python 2 code is run on Python 3).\n3. The python2 and python3 commands should be available to specify the\n version explicitly.\n\nHowever, these recommendations implicitly assumed that Python 2 would\nalways be available. As Python 2 is nearing its end of life in 2020 (PEP\n373, PEP 404), distributions are making Python 2 optional or removing it\nentirely. This means either removing the python command or switching it\nto invoke Python 3. Some distributors also decided that their users were\nbetter served by ignoring the PEP's original recommendations, and\nprovided system administrators with the freedom to configure their\nsystems based on the needs of their particular environment.\n\nCurrent Rationale\n\nAs of 2019, activating a Python virtual environment (or its functional\nequivalent) prior to script execution is one way to obtain a consistent\ncross-platform and cross-distribution experience.\n\nAccordingly, publishers can expect users of the software to provide a\nsuitable execution environment.\n\nFuture Changes to this Recommendation\n\nThis recommendation will be periodically reviewed over the next few\nyears, and updated when the core development team judges it appropriate.\nAs a point of reference, regular maintenance releases for the Python 2.7\nseries will continue until January 2020.\n\nMigration Notes\n\nThis section does not contain any official recommendations from the core\nCPython developers. It's merely a collection of notes regarding various\naspects of migrating to Python 3 as the default version of Python for a\nsystem. They will hopefully be helpful to any distributions considering\nmaking such a change.\n\n- The main barrier to a distribution switching the python command from\n python2 to python3 isn't breakage within the distribution, but\n instead breakage of private third party scripts developed by\n sysadmins and other users. Updating the python command to invoke\n python3 by default indicates that a distribution is willing to break\n such scripts with errors that are potentially quite confusing for\n users that aren't familiar with the backwards incompatible changes\n in Python 3. For example, while the change of print from a statement\n to a builtin function is relatively simple for automated converters\n to handle, the SyntaxError from attempting to use the Python 2\n notation in Python 3 may be confusing for users that are not aware\n of the change:\n\n $ python3 -c 'print \"Hello, world!\"'\n File \"\", line 1\n print \"Hello, world!\"\n ^\n SyntaxError: Missing parentheses in call to 'print'. Did you mean print(\"Hello, world!\")?\n\n While this might be obvious for experienced Pythonistas, such\n scripts might even be run by people who are not familiar with Python\n at all. Avoiding breakage of such third party scripts was the key\n reason this PEP used to recommend that python continue to refer to\n python2.\n\n- The error message python: command not found tends to be surprisingly\n actionable, even for people unfamiliar with Python.\n\n- The pythonX.X (e.g. python3.6) commands exist on modern systems, on\n which they invoke specific minor versions of the Python interpreter.\n It can be useful for distribution-specific packages to take\n advantage of these utilities if they exist, since it will prevent\n code breakage if the default minor version of a given major version\n is changed. However, scripts intending to be cross-platform should\n not rely on the presence of these utilities, but rather should be\n tested on several recent minor versions of the target major version,\n compensating, if necessary, for the small differences that exist\n between minor versions. This prevents the need for sysadmins to\n install many very similar versions of the interpreter.\n\n- When the pythonX.X binaries are provided by a distribution, the\n python2 and python3 commands should refer to one of those files\n rather than being provided as a separate binary file.\n\n- It is strongly encouraged that distribution-specific packages use\n python3 (or python2) rather than python, even in code that is not\n intended to operate on other distributions. This will reduce\n problems if the distribution later decides to change the version of\n the Python interpreter that the python command invokes, or if a\n sysadmin installs a custom python command with a different major\n version than the distribution default.\n\n- If the above point is adhered to and sysadmins are permitted to\n change the python command, then the python command should always be\n implemented as a link to the interpreter binary (or a link to a\n link) and not vice versa. That way, if a sysadmin does decide to\n replace the installed python file, they can do so without\n inadvertently deleting the previously installed binary.\n\n- Even as the Python 2 interpreter becomes less common, it remains\n reasonable for scripts to continue to use the python3 convention,\n rather than just python.\n\n- If these conventions are adhered to, it will become the case that\n the python command is only executed in an interactive manner as a\n user convenience, or else when using a virtual environment or\n similar mechanism.\n\nBackwards Compatibility\n\nA potential problem can arise if a script adhering to the\npython2/python3 convention is executed on a system not supporting these\ncommands. This is mostly a non-issue, since the sysadmin can simply\ncreate these symbolic links and avoid further problems. It is a\nsignificantly more obvious breakage than the sometimes cryptic errors\nthat can arise when attempting to execute a script containing Python 2\nspecific syntax with a Python 3 interpreter or vice versa.\n\nApplication to the CPython Reference Interpreter\n\nWhile technically a new feature, the make install and make bininstall\ncommand in the 2.7 version of CPython were adjusted to create the\nfollowing chains of symbolic links in the relevant bin directory (the\nfinal item listed in the chain is the actual installed binary, preceding\nitems are relative symbolic links):\n\n python -> python2 -> python2.7\n python-config -> python2-config -> python2.7-config\n\nSimilar adjustments were made to the macOS binary installer.\n\nThis feature first appeared in the default installation process in\nCPython 2.7.3.\n\nThe installation commands in the CPython 3.x series already create the\nappropriate symlinks. For example, CPython 3.2 creates:\n\n python3 -> python3.2\n idle3 -> idle3.2\n pydoc3 -> pydoc3.2\n python3-config -> python3.2-config\n\nAnd CPython 3.3 creates:\n\n python3 -> python3.3\n idle3 -> idle3.3\n pydoc3 -> pydoc3.3\n python3-config -> python3.3-config\n pysetup3 -> pysetup3.3\n\nThe implementation progress of these features in the default installers\nwas managed on the tracker as issue #12627 ([10]).\n\nImpact on PYTHON* Environment Variables\n\nThe choice of target for the python command implicitly affects a\ndistribution's expected interpretation of the various Python related\nenvironment variables. The use of *.pth files in the relevant\nsite-packages folder, the \"per-user site packages\" feature (see\npython -m site) or more flexible tools such as virtualenv are all more\ntolerant of the presence of multiple versions of Python on a system than\nthe direct use of PYTHONPATH.\n\nExclusion of MS Windows\n\nThis PEP deliberately excludes any proposals relating to Microsoft\nWindows, as devising an equivalent solution for Windows was deemed too\ncomplex to handle here. PEP 397 and the related discussion on the\npython-dev mailing list address this issue.\n\nReferences\n\nCopyright\n\nThis document has been placed in the public domain.\n\n[1] The console_scripts Entry Point\n(https://python-packaging.readthedocs.io/en/latest/command-line-scripts.html#the-console-scripts-entry-point)\n\n[2] Support the /usr/bin/python2 symlink upstream (with bonus grammar\nclass!)\n(https://mail.python.org/pipermail/python-dev/2011-March/108491.html)\n\n[3] Rebooting PEP 394 (aka Support the /usr/bin/python2 symlink\nupstream)\n(https://mail.python.org/pipermail/python-dev/2011-July/112322.html)\n\n[4] PEP 394 request for pronouncement (python2 symlink in *nix systems)\n(https://mail.python.org/pipermail/python-dev/2012-February/116435.html)\n\n[5] PEP 394 - Clarification of what \"python\" command should invoke\n(https://mail.python.org/pipermail/python-dev/2014-September/136374.html)\n\n[6] PEP 394: Allow the python command to not be installed, and other\nminor edits (https://github.com/python/peps/pull/630)\n\n[7] Another update for PEP 394 -- The \"python\" Command on Unix-Like\nSystems\n(https://mail.python.org/pipermail/python-dev/2019-February/156272.html)\n\n[8] May 2019 PEP update review (https://github.com/python/peps/pull/989)\n\n[9] Arch Linux announcement that their \"python\" link now refers Python 3\n(https://www.archlinux.org/news/python-is-now-python-3/)\n\n[10] Implement PEP 394 in the CPython Makefile\n(https://github.com/python/cpython/issues/56836)"},"source":{"kind":"string","value":"python-peps"},"added":{"kind":"string","value":"2024-10-18T13:23:32.875113"},"created":{"kind":"timestamp","value":"2011-03-02T00:00:00","string":"2011-03-02T00:00:00"},"metadata":{"kind":"string","value":"{\n \"license\": \"Public Domain\",\n \"url\": \"https://peps.python.org/pep-0394/\",\n \"authors\": [\n \"Kerrick Staley\"\n ],\n \"pep_number\": \"0394\",\n \"pandoc_version\": \"3.5\"\n}"}}},{"rowIdx":545,"cells":{"id":{"kind":"string","value":"0611"},"text":{"kind":"string","value":"PEP: 611 Title: The one million limit Author: Mark Shannon\n Status: Withdrawn Type: Standards Track Content-Type:\ntext/x-rst Created: 05-Dec-2019 Post-History:\n\nAbstract\n\nThis PR proposes a soft limit of one million (1 000 000), and a larger\nhard limit for various aspects of Python code and its implementation.\n\nThe Python language does not specify limits for many of its features.\nNot having any limit to these values seems to enhance programmer\nfreedom, at least superficially, but in practice the CPython VM and\nother Python virtual machines have implicit limits or are forced to\nassume that the limits are astronomical, which is expensive.\n\nThis PR lists a number of features which are to have a limit of one\nmillion.\n\nFor CPython the hard limit will be eight million (8 000 000).\n\nMotivation\n\nThere are many values that need to be represented in a virtual machine.\nIf no limit is specified for these values, then the representation must\neither be inefficient or vulnerable to overflow. The CPython virtual\nmachine represents values like line numbers, stack offsets and\ninstruction offsets by 32 bit values. This is inefficient, and\npotentially unsafe.\n\nIt is inefficient as actual values rarely need more than a dozen or so\nbits to represent them.\n\nIt is unsafe as malicious or poorly generated code could cause values to\nexceed 2³².\n\nFor example, line numbers are represented by 32 bit values internally.\nThis is inefficient, given that modules almost never exceed a few\nthousand lines. Despite being inefficient, it is still vulnerable to\noverflow as it is easy for an attacker to created a module with billions\nof newline characters.\n\nMemory access is usually a limiting factor in the performance of modern\nCPUs. Better packing of data structures enhances locality and reduces\nmemory bandwidth, at a modest increase in ALU usage (for shifting and\nmasking). Being able to safely store important values in 20 bits would\nallow memory savings in several data structures including, but not\nlimited to:\n\n- Frame objects\n- Object headers\n- Code objects\n\nThere is also the potential for a more efficient instruction format,\nspeeding up interpreter dispatch.\n\nIs this a worthwhile trade off?\n\nThe downside of any form of limit is that it might potentially make\nsomeone's job harder, for example, it may be harder to write a code\ngenerator that keeps the size of modules to one million lines. However,\nit is the author's opinion, having written many code generators, that\nsuch a limit is extremely unlikely to be a problem in practice.\n\nThe upside of these limits is the freedom it grants implementers of\nruntimes, whether CPython, PyPy, or any other implementation, to improve\nperformance. It is the author's belief, that the potential value of even\na 0.1% reduction in the cost of running Python programs globally will\nhugely exceed the cost of modifying a handful of code generators.\n\nRationale\n\nImposing a limit on values such as lines of code in a module, and the\nnumber of local variables, has significant advantages for ease of\nimplementation and efficiency of virtual machines. If the limit is\nsufficiently large, there is no adverse effect on users of the language.\n\nBy selecting a fixed but large limit for these values, it is possible to\nhave both safety and efficiency whilst causing no inconvenience to human\nprogrammers and only very rare problems for code generators.\n\nOne million\n\nThe value \"one million\" is very easy to remember.\n\nThe one million limit is mostly a limit on human generated code, not\nruntime sizes.\n\nOne million lines in a single module is a ridiculous concentration of\ncode; the entire Python standard library is about 2/3rd of a million\nlines, spread over 1600 files.\n\nThe Java Virtual Machine (JVM)[1] specifies a limit of 2¹⁶-1 (65535) for\nmany program elements similar to those covered here. This limit enables\nlimited values to fit in 16 bits, which is a very efficient machine\nrepresentation. However, this limit is quite easily exceeded in practice\nby code generators and the author is aware of existing Python code that\nalready exceeds 2¹⁶ lines of code.\n\nThe hard limit of eight million fits into 23 bits which, although not as\nconvenient for machine representation, is still reasonably compact. A\nlimit of eight million is small enough for efficiency advantages (only\n23 bits), but large enough not to impact users (no one has ever written\na module that large).\n\nWhile it is possible that generated code could exceed the limit, it is\neasy for a code generator to modify its output to conform. The author\nhas hit the 64K limit in the JVM on at least two occasions when\ngenerating Java code. The workarounds were relatively straightforward\nand wouldn't have been necessary with a limit of one million bytecodes\nor lines of code.\n\nWhere necessary, the soft limit can increased for those programs that\nexceed the one million limit.\n\nHaving a soft limit of one million provides a warning of problematic\ncode, without causing an error and forcing an immediate fix. It also\nallows dynamic optimizers to use more compact formats without inline\nchecks.\n\nSpecification\n\nThis PR proposes that the following language features and runtime values\nhave a soft limit of one million.\n\n- The number of source code lines in a module\n- The number of bytecode instructions in a code object.\n- The sum of local variables and stack usage for a code object.\n- The number of classes in a running interpreter.\n- The recursion depth of Python code.\n\nIt is likely that memory constraints would be a limiting factor before\nthe number of classes reaches one million.\n\nRecursion depth\n\nThe recursion depth limit only applies to pure Python code. Code written\nin a foreign language, such as C, may consume hardware stack and thus be\nlimited to a recursion depth of a few thousand. It is expected that\nimplementations will raise an exception should the hardware stack get\nclose to its limit. For code that mixes Python and C calls, it is most\nlikely that the hardware limit will apply first. The size of the\nhardware recursion may vary at runtime and will not be visible.\n\nSoft and hard limits\n\nImplementations should emit a warning whenever a soft limit is exceeded,\nunless the hard limit has the same value as the soft limit. When a hard\nlimit is exceeded, then an exception should be raised.\n\nDepending on the implementation, different hard limits might apply. In\nsome cases the hard limit might be below the soft limit. For example,\nmany micropython ports are unlikely to be able to support such large\nlimits.\n\nIntrospecting and modifying the limits\n\nOne or more functions will be provided in the sys module to introspect\nor modify the soft limits at runtime, but the limits may not be raised\nabove the hard limit.\n\nInferred limits\n\nThese limits are not part of the specification, but a limit of less than\none million can be inferred from the limit on the number of bytecode\ninstructions in a code object. Because there would be insufficient\ninstructions to load more than one million constants or use more than\none million names.\n\n- The number of distinct names in a code object.\n- The number of constants in a code object.\n\nThe advantages for CPython of imposing these limits:\n\nLine of code in a module and code object restrictions.\n\nWhen compiling source code to bytecode or modifying bytecode for\nprofiling or debugging, an intermediate form is required. By limiting\noperands to 23 bits, instructions can be represented in a compact 64 bit\nform allowing very fast passes over the instruction sequence.\n\nHaving 23 bit operands (24 bits for relative branches) allows\ninstructions to fit into 32 bits without needing additional EXTENDED_ARG\ninstructions. This improves dispatch, as the operand is strictly local\nto the instruction. It is unclear whether this would help performance,\nit is merely an example of what is possible.\n\nThe benefit of restricting the number of lines in a module is primarily\nthe implied limit on bytecodes. It is more important for implementations\nthat it is instructions per code object, not lines per module, that is\nlimited to one million, but it is much easier to explain a one million\nline limit. Having a consistent limit of one million is just easier to\nremember. It is mostly likely, although not guaranteed, that the line\nlimit will be hit first and thus provide a simpler to understand error\nmessage to the developer.\n\nTotal number of classes in a running interpreter\n\nThis limit has to the potential to reduce the size of object headers\nconsiderably.\n\nCurrently objects have a two word header, for objects without references\n(int, float, str, etc.) or a four word header for objects with\nreferences. By reducing the maximum number of classes, the space for the\nclass reference can be reduced from 64 bits to fewer than 32 bits\nallowing a much more compact header.\n\nFor example, a super-compact header format might look like this:\n\n struct header {\n uint32_t gc_flags:6; /* Needs finalisation, might be part of a cycle, etc. */\n uint32_t class_id:26; /* Can be efficiently mapped to address by ensuring suitable alignment of classes */\n uint32_t refcount; /* Limited memory or saturating */\n }\n\nThis format would reduce the size of a Python object without slots, on a\n64 bit machine, from 40 to 16 bytes.\n\nNote that there are two ways to use a 32 bit refcount on a 64 bit\nmachine. One is to limit each sub-interpreter to 32Gb of memory. The\nother is to use a saturating reference count, which would be a little\nbit slower, but allow unlimited memory allocation.\n\nEnforcement\n\nPython implementations are not obliged to enforce the limits. However,\nif a limit can be enforced without hurting performance, then it should\nbe.\n\nIt is anticipated that CPython will enforce the limits as follows:\n\n- The number of source code lines in a module: version 3.9 onward.\n- The number of bytecode instructions in a code object: 3.9 onward.\n- The sum of local variables and stack usage for a code object: 3.9\n onward.\n- The number of classes in a running interpreter: probably 3.10\n onward, maybe warning in 3.9.\n\nHard limits in CPython\n\nCPython will enforce a hard limit on all the above values. The value of\nthe hard limit will be 8 million.\n\nIt is hypothetically possible that some machine generated code exceeds\none or more of the above limits. The author believes that to be\nincredibly unlikely and easily fixed by modifying the output stage of\nthe code generator.\n\nWe would like to gain the benefit from the above limits for performance\nas soon as possible. To that end, CPython will start applying limits\nfrom version 3.9 onward. To ease the transition and minimize breakage,\nthe initial limits will be 16 million, reducing to 8 million in a later\nversion.\n\nBackwards Compatibility\n\nThe actual hard limits enforced by CPython will be:\n\n+---------------+--------------+\n| Version | Hard limit |\n+===============+==============+\n| 3.9 | 16 million |\n+---------------+--------------+\n| 3.10 onward | 8 million |\n+---------------+--------------+\n\nGiven the rarity of code generators that would exceed the one million\nlimits, and the environments in which they are typically used, it seems\nreasonable to start issuing warnings in 3.9 if any limited quantity\nexceeds one million.\n\nHistorically the recursion limit has been set at 1000. To avoid breaking\ncode that implicitly relies on the value being small, the soft recursion\nlimit will be increased gradually, as follows:\n\n+---------+-------------+\n| Version | Soft limit |\n+=========+=============+\n| 3.9 | 4 000 |\n+---------+-------------+\n| 3.10 | 16 000 |\n+---------+-------------+\n| 3.11 | 64 000 |\n+---------+-------------+\n| 3.12 | 125 000 |\n+---------+-------------+\n| 3.13 | 1 million |\n+---------+-------------+\n\nThe hard limit will be set to 8 million immediately.\n\nOther implementations\n\nImplementations of Python other than CPython have different purposes, so\ndifferent limits might be appropriate. This is acceptable, provided the\nlimits are clearly documented.\n\nGeneral purpose implementations\n\nGeneral purpose implementations, such as PyPy, should use the one\nmillion limit. If maximum compatibility is a goal, then they should also\nfollow CPython's behaviour for 3.9 to 3.11.\n\nSpecial purpose implementations\n\nSpecial purpose implementations may use lower limits, as long as they\nare clearly documented. An implementation designed for embedded systems,\nfor example MicroPython, might impose limits as low as a few thousand.\n\nSecurity Implications\n\nMinimal. This reduces the attack surface of any Python virtual machine\nby a small amount.\n\nReference Implementation\n\nNone, as yet. This will be implemented in CPython, once the PEP has been\naccepted.\n\nRejected Ideas\n\nBeing able to modify the hard limits upwards at compile time was\nsuggested by Tal Einat. This is rejected as the current limits of 2³²\nhave not been an issue, and the practical advantages of allowing limits\nbetween 2²⁰ and 2³² seem slight compared to the additional code\ncomplexity of supporting such a feature.\n\nOpen Issues\n\nNone, as yet.\n\nReferences\n\nhttps://docs.oracle.com/javase/specs/jvms/se8/jvms8.pdf\n\nCopyright\n\nThis document is placed in the public domain or under the\nCC0-1.0-Universal license, whichever is more permissive.\n\n[1] The Java Virtual Machine specification"},"source":{"kind":"string","value":"python-peps"},"added":{"kind":"string","value":"2024-10-18T13:23:33.002947"},"created":{"kind":"timestamp","value":"2019-12-05T00:00:00","string":"2019-12-05T00:00:00"},"metadata":{"kind":"string","value":"{\n \"license\": \"Public Domain\",\n \"url\": \"https://peps.python.org/pep-0611/\",\n \"authors\": [\n \"Mark Shannon\"\n ],\n \"pep_number\": \"0611\",\n \"pandoc_version\": \"3.5\"\n}"}}},{"rowIdx":546,"cells":{"id":{"kind":"string","value":"0632"},"text":{"kind":"string","value":"PEP: 632 Title: Deprecate distutils module Author: Steve Dower\n Discussions-To:\nhttps://discuss.python.org/t/pep-632-deprecate-distutils-module/5134\nStatus: Final Type: Standards Track Content-Type: text/x-rst Created:\n03-Sep-2020 Python-Version: 3.10 Post-History: 03-Sep-2020, 22-Jan-2021\nResolution:\nhttps://mail.python.org/archives/list/python-dev@python.org/thread/TXU6TVOMBLQU3SV57DMMOA5Y2E67AW7P/\n\nAbstract\n\nThe distutils module[1] has for a long time recommended using the\nsetuptools package[2] instead. Setuptools has recently integrated a\ncomplete copy of distutils and is no longer dependent on the standard\nlibrary[3]. Pip has been silently replacing distutils with setuptools\nwhen installing packages for a long time already, and the distutils\ndocumentation has stated that it is being phased out since 2014 (or\nearlier). It is time to remove it from the standard library.\n\nMotivation\n\ndistutils[4] is a largely undocumented and unmaintained collection of\nutilities for packaging and distributing Python packages, including\ncompilation of native extension modules. It defines a configuration\nformat that describes a Python distribution and provides the tools to\nconvert a directory of source code into a source distribution, and some\nforms of binary distribution. Because of its place in the standard\nlibrary, many updates can only be released with a major release, and\nusers cannot rely on particular fixes being available.\n\nsetuptools[5] is a better documented and well maintained enhancement\nbased on distutils. While it provides very similar functionality, it is\nmuch better able to support users on earlier Python releases, and can\nrespond to bug reports more quickly. A number of platform-specific\nenhancements already exist in setuptools that have not been added to\ndistutils, and there is been a long-standing recommendation in the\ndistutils documentation to prefer setuptools.\n\nHistorically, setuptools has extended distutils using subclassing and\nmonkeypatching, but has now taken a copy of the underlying code.[6] As a\nresult, the second last major dependency on distutils is gone and there\nis no need to keep it in the standard library.\n\nThe final dependency on distutils is CPython itself, which uses it to\nbuild native extension modules in the standard library (except on\nWindows). Because this is a CPython build-time dependency, it is\npossible to continue to use distutils for this specific case without it\nbeing part of the standard library.\n\nDeprecation and removal will make it obvious that issues should be fixed\nin the setuptools project, and will reduce a source of bug reports and\nunnecessary test maintenance. It will also help promote the development\nof alternative build backends, which can now be supported more easily\nthanks to PEP 517.\n\nSpecification\n\nIn Python 3.10 and 3.11, distutils will be formally marked as\ndeprecated. All known issues will be closed at this time.\nimport distutils will raise a deprecation warning. New issues that would\nbe considered release blocking may still be fixed, but support for new\ntools or platforms will not be added.\n\nDuring Python 3.10 and 3.11, uses of distutils within the standard\nlibrary may change to use alternative APIs.\n\nIn Python 3.12, distutils will no longer be installed by make install or\nany of the first-party distribution. Third-party redistributors should\nno longer include distutils in their bundles or repositories.\n\nThis PEP makes no specification on migrating the parts of the CPython\nbuild process that currently use distutils. Depending on contributions,\nthis migration may occur at any time.\n\nAfter Python 3.12 is started and when the CPython build process no\nlonger depends on distutils being in the standard library, the entire\nLib/distutils directory and Lib/test/test_distutils.py file will be\nremoved from the repository.\n\nOther references to distutils will be cleaned up. As of Python 3.9's\ninitial release, the following modules have references in code or\ncomments:\n\n- Lib/ctypes/util.py\n- Lib/site.py\n- Lib/sysconfig.py\n- Lib/_aix_support.py\n- Lib/_bootsubprocess.py\n- Lib/_osx_support.py\n- Modules/_decimal/tests/formathelper.py\n\nThe following Tools in CPython also refer to distutils. Note that none\nof these are installed with CPython:\n\n- PC/layout (references will be removed)\n- Tools/msi (references will be removed)\n- Tools/peg_generator (will be adapted to a different build tool)\n- Tools/test2to3 (example project will be removed)\n\nAs the distutils code is already included in setuptools, there is no\nneed to republish it in any other form. Those who require access to the\nfunctionality should use setuptools or an alternative build backend.\n\nBackwards Compatibility\n\nCode that imports distutils will no longer work from Python 3.12.\n\nThe suggested migration path is to use the equivalent (though not\nidentical) imports from setuptools (see[7]), or to migrate to an\nalternative build backend (see PEP 517).\n\nCode already exists in setuptools to transparently switch setup.py files\nusing distutils onto their equivalents, and so most working build\nscripts are already known to work with setuptools. Such scripts may need\nto update their import statements. Consult the setuptools documentation\nfor specific migration advice.[8]\n\nSome projects use alternate sets of patches over distutils, notably,\nnumpy.distutils.[9] Projects that we know are doing this have been\ninformed.\n\nMany build scripts use custom commands or narrowly scoped patches. As\nthese packages are already subject to setuptools overriding distutils,\nwe expect minimal disruption as a result of distutils being removed.\nScripts may still need to be updated to avoid importing distutils.\n\nReference Implementation\n\nsetuptools version 48 includes the complete copy of distutils, and as\nsuch is no longer dependent on the standard library's copy. Most\nimplementation issues they have faced are due to the continuing\nexistence of distutils in the standard library, and so removal will\nimprove the stability of their implementation.\n\nThere is not yet a reference implementation for the removal of distutils\nfrom the standard library, nor is there an implementation for CPython's\nnative module builds without relying on the standard library copy of\ndistutils.\n\nMigration Advice\n\nNote\n\nThis section suggests some alternative replacements for popular\nfunctionality that is being formally deprecated with this PEP. It is\ncurrent at time of writing, but is not kept up to date.\n\nFor these modules or types, setuptools is the best substitute:\n\n- distutils.ccompiler\n- distutils.cmd.Command\n- distutils.command\n- distutils.config\n- distutils.core.Distribution\n- distutils.errors\n\nFor these modules or types, use the standards-defined Python Packaging\nAuthority packages specified:\n\n- distutils.version — use the packaging package\n\nFor these modules or functions, use the standard library module shown:\n\n- distutils.fancy_getopt — use the argparse module\n- distutils.spawn.find_executable — use the shutil.which function\n- distutils.spawn.spawn — use the subprocess.run function\n- distutils.sysconfig — use the sysconfig module\n- distutils.util.get_platform — use the platform module\n\nFor these functions, and any others not mentioned here, you will need to\nreimplement the functionality yourself. The legacy documentation can be\nfound at https://docs.python.org/3.9/distutils/apiref.html\n\n- distutils.dir_util.create_tree\n- distutils.util.change_root\n- distutils.util.strtobool\n\nRejected Ideas\n\nDeprecate but do not delete\n\nThe primary concern with this approach is that distutils most frequently\nbreaks because of platform differences, which means that without\nmaintenance, it will stop working out-of-sync with any Python release.\nThis makes it impossible for libraries to reliably detect when they will\nstop working.\n\nIn contrast, this PEP proposes a concrete date, known well in advance,\nwhen distutils will stop working, and commits to not breaking the API\nbefore that time. This gives maintainers a predictable schedule, ensures\nany breakage occurs at a point where users will already be expecting\nchanged behavior, and provides a reliable detection mechanism\n(specifically, that import distutils raises).\n\nFinally, as long as distutils remains in the standard library in any\nform, it will interfere with third-party packages that provide shims or\nreplacements, including setuptools. Completely removing the package at a\nknown version makes it possible for third-parties to safely use a\nsubstitute.\n\nOnly deprecate the setuptools-like functionality\n\nThis suggestion assumes that there exists a volunteer to maintain\nwhatever is left, which is not true. It also implies that anybody knows\nwhich functionality should remain, which as seen in the discussions is\nnot at all clear.\n\nMost helper functions in distutils already have supported (and improved)\nalternatives, often in the standard library, and there is little that\ncan be done to the legacy versions without breaking compatibility. (And\nany break requiring maintainers to update their code is essentially\nequivalent to requiring them to import a different function.)\n\nThe last point from the previous section also applies here.\n\nReferences\n\nCopyright\n\nThis document is placed in the public domain or under the\nCC0-1.0-Universal license, whichever is more permissive.\n\n\f\n\n Local Variables: mode: indented-text indent-tabs-mode: nil\n sentence-end-double-space: t fill-column: 70 coding: utf-8 End:\n\n[1] distutils - Building and installing Python modules\n(https://docs.python.org/3.9/library/distutils.html)\n\n[2] setuptools - PyPI (https://pypi.org/project/setuptools/)\n\n[3] setuptools Issue #417 - Adopt distutils\n(https://github.com/pypa/setuptools/issues/417)\n\n[4] distutils - Building and installing Python modules\n(https://docs.python.org/3.9/library/distutils.html)\n\n[5] setuptools - PyPI (https://pypi.org/project/setuptools/)\n\n[6] setuptools Issue #417 - Adopt distutils\n(https://github.com/pypa/setuptools/issues/417)\n\n[7] Porting from Distutils\n(https://setuptools.readthedocs.io/en/latest/deprecated/distutils-legacy.html)\n\n[8] Porting from Distutils\n(https://setuptools.readthedocs.io/en/latest/deprecated/distutils-legacy.html)\n\n[9] Packaging (numpy.distutils)\n(https://numpy.org/doc/stable/reference/distutils.html)"},"source":{"kind":"string","value":"python-peps"},"added":{"kind":"string","value":"2024-10-18T13:23:33.019206"},"created":{"kind":"timestamp","value":"2020-09-03T00:00:00","string":"2020-09-03T00:00:00"},"metadata":{"kind":"string","value":"{\n \"license\": \"Public Domain\",\n \"url\": \"https://peps.python.org/pep-0632/\",\n \"authors\": [\n \"Steve Dower\"\n ],\n \"pep_number\": \"0632\",\n \"pandoc_version\": \"3.5\"\n}"}}},{"rowIdx":547,"cells":{"id":{"kind":"string","value":"0612"},"text":{"kind":"string","value":"PEP: 612 Title: Parameter Specification Variables Author: Mark Mendoza\n Sponsor: Guido van Rossum \nBDFL-Delegate: Guido van Rossum Discussions-To:\ntyping-sig@python.org Status: Final Type: Standards Track Topic: Typing\nCreated: 18-Dec-2019 Python-Version: 3.10 Post-History: 18-Dec-2019,\n13-Jul-2020\n\ntyping:paramspec and typing.ParamSpec\n\nAbstract\n\nThere currently are two ways to specify the type of a callable, the\nCallable[[int, str], bool] syntax defined in PEP 484, and callback\nprotocols from PEP\n544 <544#callback-protocols>. Neither of these support forwarding the\nparameter types of one callable over to another callable, making it\ndifficult to annotate function decorators. This PEP proposes\ntyping.ParamSpec and typing.Concatenate to support expressing these\nkinds of relationships.\n\nMotivation\n\nThe existing standards for annotating higher order functions don’t give\nus the tools to annotate the following common decorator pattern\nsatisfactorily:\n\n from typing import Awaitable, Callable, TypeVar\n\n R = TypeVar(\"R\")\n\n def add_logging(f: Callable[..., R]) -> Callable[..., Awaitable[R]]:\n async def inner(*args: object, **kwargs: object) -> R:\n await log_to_database()\n return f(*args, **kwargs)\n return inner\n\n @add_logging\n def takes_int_str(x: int, y: str) -> int:\n return x + 7\n\n await takes_int_str(1, \"A\")\n await takes_int_str(\"B\", 2) # fails at runtime\n\nadd_logging, a decorator which logs before each entry into the decorated\nfunction, is an instance of the Python idiom of one function passing all\narguments given to it over to another function. This is done through the\ncombination of the *args and **kwargs features in both parameters and in\narguments. When one defines a function (like inner) that takes\n(*args, **kwargs) and goes on to call another function with\n(*args, **kwargs), the wrapping function can only be safely called in\nall of the ways that the wrapped function could be safely called. To\ntype this decorator, we’d like to be able to place a dependency between\nthe parameters of the callable f and the parameters of the returned\nfunction. PEP 484 supports dependencies between single types, as in\ndef append(l: typing.List[T], e: T) -> typing.List[T]: ..., but there is\nno existing way to do so with a complicated entity like the parameters\nof a function.\n\nDue to the limitations of the status quo, the add_logging example will\ntype check but will fail at runtime. inner will pass the string “B” into\ntakes_int_str, which will try to add 7 to it, triggering a type error.\nThis was not caught by the type checker because the decorated\ntakes_int_str was given the type Callable[..., Awaitable[int]] (an\nellipsis in place of parameter types is specified to mean that we do no\nvalidation on arguments).\n\nWithout the ability to define dependencies between the parameters of\ndifferent callable types, there is no way, at present, to make\nadd_logging compatible with all functions, while still preserving the\nenforcement of the parameters of the decorated function.\n\nWith the addition of the ParamSpec variables proposed by this PEP, we\ncan rewrite the previous example in a way that keeps the flexibility of\nthe decorator and the parameter enforcement of the decorated function.\n\n from typing import Awaitable, Callable, ParamSpec, TypeVar\n\n P = ParamSpec(\"P\")\n R = TypeVar(\"R\")\n\n def add_logging(f: Callable[P, R]) -> Callable[P, Awaitable[R]]:\n async def inner(*args: P.args, **kwargs: P.kwargs) -> R:\n await log_to_database()\n return f(*args, **kwargs)\n return inner\n\n @add_logging\n def takes_int_str(x: int, y: str) -> int:\n return x + 7\n\n await takes_int_str(1, \"A\") # Accepted\n await takes_int_str(\"B\", 2) # Correctly rejected by the type checker\n\nAnother common decorator pattern that has previously been impossible to\ntype is the practice of adding or removing arguments from the decorated\nfunction. For example:\n\n class Request:\n ...\n\n def with_request(f: Callable[..., R]) -> Callable[..., R]:\n def inner(*args: object, **kwargs: object) -> R:\n return f(Request(), *args, **kwargs)\n return inner\n\n @with_request\n def takes_int_str(request: Request, x: int, y: str) -> int:\n # use request\n return x + 7\n\n takes_int_str(1, \"A\")\n takes_int_str(\"B\", 2) # fails at runtime\n\nWith the addition of the Concatenate operator from this PEP, we can even\ntype this more complex decorator.\n\n from typing import Concatenate\n\n def with_request(f: Callable[Concatenate[Request, P], R]) -> Callable[P, R]:\n def inner(*args: P.args, **kwargs: P.kwargs) -> R:\n return f(Request(), *args, **kwargs)\n return inner\n\n @with_request\n def takes_int_str(request: Request, x: int, y: str) -> int:\n # use request\n return x + 7\n\n takes_int_str(1, \"A\") # Accepted\n takes_int_str(\"B\", 2) # Correctly rejected by the type checker\n\nSpecification\n\nParamSpec Variables\n\nDeclaration\n\nA parameter specification variable is defined in a similar manner to how\na normal type variable is defined with typing.TypeVar.\n\n from typing import ParamSpec\n P = ParamSpec(\"P\") # Accepted\n P = ParamSpec(\"WrongName\") # Rejected because P =/= WrongName\n\nThe runtime should accept bounds and covariant and contravariant\narguments in the declaration just as typing.TypeVar does, but for now we\nwill defer the standardization of the semantics of those options to a\nlater PEP.\n\nValid use locations\n\nPreviously only a list of parameter arguments ([A, B, C]) or an ellipsis\n(signifying \"undefined parameters\") were acceptable as the first\n\"argument\" to typing.Callable . We now augment that with two new\noptions: a parameter specification variable (Callable[P, int]) or a\nconcatenation on a parameter specification variable\n(Callable[Concatenate[int, P], int]).\n\n callable ::= Callable \"[\" parameters_expression, type_expression \"]\"\n\n parameters_expression ::=\n | \"...\"\n | \"[\" [ type_expression (\",\" type_expression)* ] \"]\"\n | parameter_specification_variable\n | concatenate \"[\"\n type_expression (\",\" type_expression)* \",\"\n parameter_specification_variable\n \"]\"\n\nwhere parameter_specification_variable is a typing.ParamSpec variable,\ndeclared in the manner as defined above, and concatenate is\ntyping.Concatenate.\n\nAs before, parameters_expressions by themselves are not acceptable in\nplaces where a type is expected\n\n def foo(x: P) -> P: ... # Rejected\n def foo(x: Concatenate[int, P]) -> int: ... # Rejected\n def foo(x: typing.List[P]) -> None: ... # Rejected\n def foo(x: Callable[[int, str], P]) -> None: ... # Rejected\n\nUser-Defined Generic Classes\n\nJust as defining a class as inheriting from Generic[T] makes a class\ngeneric for a single parameter (when T is a TypeVar), defining a class\nas inheriting from Generic[P] makes a class generic on\nparameters_expressions (when P is a ParamSpec).\n\n T = TypeVar(\"T\")\n P_2 = ParamSpec(\"P_2\")\n\n class X(Generic[T, P]):\n f: Callable[P, int]\n x: T\n\n def f(x: X[int, P_2]) -> str: ... # Accepted\n def f(x: X[int, Concatenate[int, P_2]]) -> str: ... # Accepted\n def f(x: X[int, [int, bool]]) -> str: ... # Accepted\n def f(x: X[int, ...]) -> str: ... # Accepted\n def f(x: X[int, int]) -> str: ... # Rejected\n\nBy the rules defined above, spelling a concrete instance of a class\ngeneric with respect to only a single ParamSpec would require unsightly\ndouble brackets. For aesthetic purposes we allow these to be omitted.\n\n class Z(Generic[P]):\n f: Callable[P, int]\n\n def f(x: Z[[int, str, bool]]) -> str: ... # Accepted\n def f(x: Z[int, str, bool]) -> str: ... # Equivalent\n\n # Both Z[[int, str, bool]] and Z[int, str, bool] express this:\n class Z_instantiated:\n f: Callable[[int, str, bool], int]\n\nSemantics\n\nThe inference rules for the return type of a function invocation whose\nsignature contains a ParamSpec variable are analogous to those around\nevaluating ones with TypeVars.\n\n def changes_return_type_to_str(x: Callable[P, int]) -> Callable[P, str]: ...\n\n def returns_int(a: str, b: bool) -> int: ...\n\n f = changes_return_type_to_str(returns_int) # f should have the type:\n # (a: str, b: bool) -> str\n\n f(\"A\", True) # Accepted\n f(a=\"A\", b=True) # Accepted\n f(\"A\", \"A\") # Rejected\n\n expects_str(f(\"A\", True)) # Accepted\n expects_int(f(\"A\", True)) # Rejected\n\nJust as with traditional TypeVars, a user may include the same ParamSpec\nmultiple times in the arguments of the same function, to indicate a\ndependency between multiple arguments. In these cases a type checker may\nchoose to solve to a common behavioral supertype (i.e. a set of\nparameters for which all of the valid calls are valid in both of the\nsubtypes), but is not obligated to do so.\n\n P = ParamSpec(\"P\")\n\n def foo(x: Callable[P, int], y: Callable[P, int]) -> Callable[P, bool]: ...\n\n def x_y(x: int, y: str) -> int: ...\n def y_x(y: int, x: str) -> int: ...\n\n foo(x_y, x_y) # Should return (x: int, y: str) -> bool\n\n foo(x_y, y_x) # Could return (__a: int, __b: str) -> bool\n # This works because both callables have types that are\n # behavioral subtypes of Callable[[int, str], int]\n\n\n def keyword_only_x(*, x: int) -> int: ...\n def keyword_only_y(*, y: int) -> int: ...\n foo(keyword_only_x, keyword_only_y) # Rejected\n\nThe constructors of user-defined classes generic on ParamSpecs should be\nevaluated in the same way.\n\n U = TypeVar(\"U\")\n\n class Y(Generic[U, P]):\n f: Callable[P, str]\n prop: U\n\n def __init__(self, f: Callable[P, str], prop: U) -> None:\n self.f = f\n self.prop = prop\n\n def a(q: int) -> str: ...\n\n Y(a, 1) # Should resolve to Y[(q: int), int]\n Y(a, 1).f # Should resolve to (q: int) -> str\n\nThe semantics of Concatenate[X, Y, P] are that it represents the\nparameters represented by P with two positional-only parameters\nprepended. This means that we can use it to represent higher order\nfunctions that add, remove or transform a finite number of parameters of\na callable.\n\n def bar(x: int, *args: bool) -> int: ...\n\n def add(x: Callable[P, int]) -> Callable[Concatenate[str, P], bool]: ...\n\n add(bar) # Should return (__a: str, x: int, *args: bool) -> bool\n\n def remove(x: Callable[Concatenate[int, P], int]) -> Callable[P, bool]: ...\n\n remove(bar) # Should return (*args: bool) -> bool\n\n def transform(\n x: Callable[Concatenate[int, P], int]\n ) -> Callable[Concatenate[str, P], bool]: ...\n\n transform(bar) # Should return (__a: str, *args: bool) -> bool\n\nThis also means that while any function that returns an R can satisfy\ntyping.Callable[P, R], only functions that can be called positionally in\ntheir first position with a X can satisfy\ntyping.Callable[Concatenate[X, P], R].\n\n def expects_int_first(x: Callable[Concatenate[int, P], int]) -> None: ...\n\n @expects_int_first # Rejected\n def one(x: str) -> int: ...\n\n @expects_int_first # Rejected\n def two(*, x: int) -> int: ...\n\n @expects_int_first # Rejected\n def three(**kwargs: int) -> int: ...\n\n @expects_int_first # Accepted\n def four(*args: int) -> int: ...\n\nThere are still some classes of decorators still not supported with\nthese features:\n\n- those that add/remove/change a variable number of parameters (for\n example, functools.partial will remain untypable even after this\n PEP)\n- those that add/remove/change keyword-only parameters (See\n Concatenating Keyword Parameters for more details).\n\nThe components of a ParamSpec\n\nA ParamSpec captures both positional and keyword accessible parameters,\nbut there unfortunately is no object in the runtime that captures both\nof these together. Instead, we are forced to separate them into *args\nand **kwargs, respectively. This means we need to be able to split apart\na single ParamSpec into these two components, and then bring them back\ntogether into a call. To do this, we introduce P.args to represent the\ntuple of positional arguments in a given call and P.kwargs to represent\nthe corresponding Mapping of keywords to values.\n\nValid use locations\n\nThese \"properties\" can only be used as the annotated types for *args and\n**kwargs, accessed from a ParamSpec already in scope.\n\n def puts_p_into_scope(f: Callable[P, int]) -> None:\n\n def inner(*args: P.args, **kwargs: P.kwargs) -> None: # Accepted\n pass\n\n def mixed_up(*args: P.kwargs, **kwargs: P.args) -> None: # Rejected\n pass\n\n def misplaced(x: P.args) -> None: # Rejected\n pass\n\n def out_of_scope(*args: P.args, **kwargs: P.kwargs) -> None: # Rejected\n pass\n\nFurthermore, because the default kind of parameter in Python ((x: int))\nmay be addressed both positionally and through its name, two valid\ninvocations of a (*args: P.args, **kwargs: P.kwargs) function may give\ndifferent partitions of the same set of parameters. Therefore, we need\nto make sure that these special types are only brought into the world\ntogether, and are used together, so that our usage is valid for all\npossible partitions.\n\n def puts_p_into_scope(f: Callable[P, int]) -> None:\n\n stored_args: P.args # Rejected\n\n stored_kwargs: P.kwargs # Rejected\n\n def just_args(*args: P.args) -> None: # Rejected\n pass\n\n def just_kwargs(**kwargs: P.kwargs) -> None: # Rejected\n pass\n\nSemantics\n\nWith those requirements met, we can now take advantage of the unique\nproperties afforded to us by this set up:\n\n- Inside the function, args has the type P.args, not\n Tuple[P.args, ...] as would be with a normal annotation (and\n likewise with the **kwargs)\n - This special case is necessary to encapsulate the heterogeneous\n contents of the args/kwargs of a given call, which cannot be\n expressed by an indefinite tuple/dictionary type.\n- A function of type Callable[P, R] can be called with\n (*args, **kwargs) if and only if args has the type P.args and kwargs\n has the type P.kwargs, and that those types both originated from the\n same function declaration.\n- A function declared as\n def inner(*args: P.args, **kwargs: P.kwargs) -> X has type\n Callable[P, X].\n\nWith these three properties, we now have the ability to fully type check\nparameter preserving decorators.\n\n def decorator(f: Callable[P, int]) -> Callable[P, None]:\n\n def foo(*args: P.args, **kwargs: P.kwargs) -> None:\n\n f(*args, **kwargs) # Accepted, should resolve to int\n\n f(*kwargs, **args) # Rejected\n\n f(1, *args, **kwargs) # Rejected\n\n return foo # Accepted\n\nTo extend this to include Concatenate, we declare the following\nproperties:\n\n- A function of type Callable[Concatenate[A, B, P], R] can only be\n called with (a, b, *args, **kwargs) when args and kwargs are the\n respective components of P, a is of type A and b is of type B.\n- A function declared as\n def inner(a: A, b: B, *args: P.args, **kwargs: P.kwargs) -> R has\n type Callable[Concatenate[A, B, P], R]. Placing keyword-only\n parameters between the *args and **kwargs is forbidden.\n\n def add(f: Callable[P, int]) -> Callable[Concatenate[str, P], None]:\n\n def foo(s: str, *args: P.args, **kwargs: P.kwargs) -> None: # Accepted\n pass\n\n def bar(*args: P.args, s: str, **kwargs: P.kwargs) -> None: # Rejected\n pass\n\n return foo # Accepted\n\n\n def remove(f: Callable[Concatenate[int, P], int]) -> Callable[P, None]:\n\n def foo(*args: P.args, **kwargs: P.kwargs) -> None:\n f(1, *args, **kwargs) # Accepted\n\n f(*args, 1, **kwargs) # Rejected\n\n f(*args, **kwargs) # Rejected\n\n return foo\n\nNote that the names of the parameters preceding the ParamSpec components\nare not mentioned in the resulting Concatenate. This means that these\nparameters can not be addressed via a named argument:\n\n def outer(f: Callable[P, None]) -> Callable[P, None]:\n def foo(x: int, *args: P.args, **kwargs: P.kwargs) -> None:\n f(*args, **kwargs)\n\n def bar(*args: P.args, **kwargs: P.kwargs) -> None:\n foo(1, *args, **kwargs) # Accepted\n foo(x=1, *args, **kwargs) # Rejected\n\n return bar\n\nThis is not an implementation convenience, but a soundness requirement.\nIf we were to allow that second calling style, then the following\nsnippet would be problematic.\n\n @outer\n def problem(*, x: object) -> None:\n pass\n\n problem(x=\"uh-oh\")\n\nInside of bar, we would get\nTypeError: foo() got multiple values for argument 'x'. Requiring these\nconcatenated arguments to be addressed positionally avoids this kind of\nproblem, and simplifies the syntax for spelling these types. Note that\nthis also why we have to reject signatures of the form\n(*args: P.args, s: str, **kwargs: P.kwargs) (See Concatenating Keyword\nParameters for more details).\n\nIf one of these prepended positional parameters contains a free\nParamSpec, we consider that variable in scope for the purposes of\nextracting the components of that ParamSpec. That allows us to spell\nthings like this:\n\n def twice(f: Callable[P, int], *args: P.args, **kwargs: P.kwargs) -> int:\n return f(*args, **kwargs) + f(*args, **kwargs)\n\nThe type of twice in the above example is\nCallable[Concatenate[Callable[P, int], P], int], where P is bound by the\nouter Callable. This has the following semantics:\n\n def a_int_b_str(a: int, b: str) -> int:\n pass\n\n twice(a_int_b_str, 1, \"A\") # Accepted\n\n twice(a_int_b_str, b=\"A\", a=1) # Accepted\n\n twice(a_int_b_str, \"A\", 1) # Rejected\n\nBackwards Compatibility\n\nThe only changes necessary to existing features in typing is allowing\nthese ParamSpec and Concatenate objects to be the first parameter to\nCallable and to be a parameter to Generic. Currently Callable expects a\nlist of types there and Generic expects single types, so they are\ncurrently mutually exclusive. Otherwise, existing code that doesn't\nreference the new interfaces will be unaffected.\n\nReference Implementation\n\nThe Pyre type checker supports all of the behavior described above. A\nreference implementation of the runtime components needed for those uses\nis provided in the pyre_extensions module. A reference implementation\nfor CPython can be found here.\n\nRejected Alternatives\n\nUsing List Variadics and Map Variadics\n\nWe considered just trying to make something like this with a callback\nprotocol which was parameterized on a list-type variadic, and a map-type\nvariadic like so:\n\n R = typing.TypeVar(“R”)\n Tpositionals = ...\n Tkeywords = ...\n class BetterCallable(typing.Protocol[Tpositionals, Tkeywords, R]):\n def __call__(*args: Tpositionals, **kwargs: Tkeywords) -> R: ...\n\nHowever, there are some problems with trying to come up with a\nconsistent solution for those type variables for a given callable. This\nproblem comes up with even the simplest of callables:\n\n def simple(x: int) -> None: ...\n simple <: BetterCallable[[int], [], None]\n simple <: BetterCallable[[], {“x”: int}, None]\n BetterCallable[[int], [], None] BetterCallable[[Ts], [Tmap], str]:\n def decorated(*args: Ts, **kwargs: Tmap) -> str:\n x = f(*args, **kwargs)\n return int_to_str(x)\n return decorated\n\n @decorator\n def foo(x: int) -> int:\n return x\n\n reveal_type(foo) # Option A: BetterCallable[[int], {}, str]\n # Option B: BetterCallable[[], {x: int}, str]\n foo(7) # fails under option B\n foo(x=7) # fails under option A\n\nThe core problem here is that, by default, parameters in Python can\neither be called positionally or as a keyword argument. This means we\nreally have three categories (positional-only, positional-or-keyword,\nkeyword-only) we’re trying to jam into two categories. This is the same\nproblem that we briefly mentioned when discussing .args and .kwargs.\nFundamentally, in order to capture two categories when there are some\nthings that can be in either category, we need a higher level primitive\n(ParamSpec) to capture all three, and then split them out afterward.\n\nDefining ParametersOf\n\nAnother proposal we considered was defining ParametersOf and ReturnType\noperators which would operate on a domain of a newly defined Function\ntype. Function would be callable with, and only with ParametersOf[F].\nParametersOf and ReturnType would only operate on type variables with\nprecisely this bound. The combination of these three features could\nexpress everything that we can express with ParamSpecs.\n\n F = TypeVar(\"F\", bound=Function)\n\n def no_change(f: F) -> F:\n def inner(\n *args: ParametersOf[F].args,\n **kwargs: ParametersOf[F].kwargs\n ) -> ReturnType[F]:\n return f(*args, **kwargs)\n return inner\n\n def wrapping(f: F) -> Callable[ParametersOf[F], List[ReturnType[F]]]:\n def inner(\n *args: ParametersOf[F].args,\n **kwargs: ParametersOf[F].kwargs\n ) -> List[ReturnType[F]]:\n return [f(*args, **kwargs)]\n return inner\n\n def unwrapping(\n f: Callable[ParametersOf[F], List[R]]\n ) -> Callable[ParametersOf[F], R]:\n def inner(\n *args: ParametersOf[F].args,\n **kwargs: ParametersOf[F].kwargs\n ) -> R:\n return f(*args, **kwargs)[0]\n return inner\n\nWe decided to go with ParamSpecs over this approach for several reasons:\n\n- The footprint of this change would be larger, as we would need two\n new operators, and a new type, while ParamSpec just introduces a new\n variable.\n- Python typing has so far has avoided supporting operators, whether\n user-defined or built-in, in favor of destructuring. Accordingly,\n ParamSpec based signatures look much more like existing Python.\n- The lack of user-defined operators makes common patterns hard to\n spell. unwrapping is odd to read because F is not actually referring\n to any callable. It’s just being used as a container for the\n parameters we wish to propagate. It would read better if we could\n define an operator RemoveList[List[X]] = X and then unwrapping could\n take F and return\n Callable[ParametersOf[F], RemoveList[ReturnType[F]]]. Without that,\n we unfortunately get into a situation where we have to use a\n Function-variable as an improvised ParamSpec, in that we never\n actually bind the return type.\n\nIn summary, between these two equivalently powerful syntaxes, ParamSpec\nfits much more naturally into the status quo.\n\nConcatenating Keyword Parameters\n\nIn principle the idea of concatenation as a means to modify a finite\nnumber of positional parameters could be expanded to include keyword\nparameters.\n\n def add_n(f: Callable[P, R]) -> Callable[Concatenate[(\"n\", int), P], R]:\n def inner(*args: P.args, n: int, **kwargs: P.kwargs) -> R:\n # use n\n return f(*args, **kwargs)\n return inner\n\nHowever, the key distinction is that while prepending positional-only\nparameters to a valid callable type always yields another valid callable\ntype, the same cannot be said for adding keyword-only parameters. As\nalluded to above , the issue is name collisions. The parameters\nConcatenate[(\"n\", int), P] are only valid when P itself does not already\nhave a parameter named n.\n\n def innocent_wrapper(f: Callable[P, R]) -> Callable[P, R]:\n def inner(*args: P.args, **kwargs: P.kwargs) -> R:\n added = add_n(f)\n return added(*args, n=1, **kwargs)\n return inner\n\n @innocent_wrapper\n def problem(n: int) -> None:\n pass\n\nCalling problem(2) works fine, but calling problem(n=2) leads to a\nTypeError: problem() got multiple values for argument 'n' from the call\nto added inside of innocent_wrapper.\n\nThis kind of situation could be avoided, and this kind of decorator\ncould be typed if we could reify the constraint that a set of parameters\nnot contain a certain name, with something like:\n\n P_without_n = ParamSpec(\"P_without_n\", banned_names=[\"n\"])\n\n def add_n(\n f: Callable[P_without_n, R]\n ) -> Callable[Concatenate[(\"n\", int), P_without_n], R]: ...\n\nThe call to add_n inside of innocent_wrapper could then be rejected\nsince the callable was not guaranteed not to already have a parameter\nnamed n.\n\nHowever, enforcing these constraints would require enough additional\nimplementation work that we judged this extension to be out of scope of\nthis PEP. Fortunately the design of ParamSpecs are such that we can\nreturn to this idea later if there is sufficient demand.\n\nNaming this a ParameterSpecification\n\nWe decided that ParameterSpecification was a little too long-winded for\nuse here, and that this style of abbreviated name made it look more like\nTypeVar.\n\nNaming this an ArgSpec\n\nWe think that calling this a ParamSpec is more correct than referring to\nit as an ArgSpec, since callables have parameters, which are distinct\nfrom the arguments which are passed to them in a given call site. A\ngiven binding for a ParamSpec is a set of function parameters, not a\ncall-site’s arguments.\n\nAcknowledgements\n\nThanks to all of the members of the Pyre team for their comments on\nearly drafts of this PEP, and for their help with the reference\nimplementation.\n\nThanks are also due to the whole Python typing community for their early\nfeedback on this idea at a Python typing meetup, leading directly to the\nmuch more compact .args/.kwargs syntax.\n\nCopyright\n\nThis document is placed in the public domain or under the\nCC0-1.0-Universal license, whichever is more permissive."},"source":{"kind":"string","value":"python-peps"},"added":{"kind":"string","value":"2024-10-18T13:23:33.051102"},"created":{"kind":"timestamp","value":"2019-12-18T00:00:00","string":"2019-12-18T00:00:00"},"metadata":{"kind":"string","value":"{\n \"license\": \"Public Domain\",\n \"url\": \"https://peps.python.org/pep-0612/\",\n \"authors\": [\n \"Mark Mendoza\"\n ],\n \"pep_number\": \"0612\",\n \"pandoc_version\": \"3.5\"\n}"}}},{"rowIdx":548,"cells":{"id":{"kind":"string","value":"0515"},"text":{"kind":"string","value":"PEP: 515 Title: Underscores in Numeric Literals Version: $Revision$\nLast-Modified: $Date$ Author: Georg Brandl, Serhiy Storchaka Status:\nFinal Type: Standards Track Content-Type: text/x-rst Created:\n10-Feb-2016 Python-Version: 3.6 Post-History: 10-Feb-2016, 11-Feb-2016\n\nAbstract and Rationale\n\nThis PEP proposes to extend Python's syntax and number-from-string\nconstructors so that underscores can be used as visual separators for\ndigit grouping purposes in integral, floating-point and complex number\nliterals.\n\nThis is a common feature of other modern languages, and can aid\nreadability of long literals, or literals whose value should clearly\nseparate into parts, such as bytes or words in hexadecimal notation.\n\nExamples:\n\n # grouping decimal numbers by thousands\n amount = 10_000_000.0\n\n # grouping hexadecimal addresses by words\n addr = 0xCAFE_F00D\n\n # grouping bits into nibbles in a binary literal\n flags = 0b_0011_1111_0100_1110\n\n # same, for string conversions\n flags = int('0b_1111_0000', 2)\n\nSpecification\n\nThe current proposal is to allow one underscore between digits, and\nafter base specifiers in numeric literals. The underscores have no\nsemantic meaning, and literals are parsed as if the underscores were\nabsent.\n\nLiteral Grammar\n\nThe production list for integer literals would therefore look like this:\n\n integer: decinteger | bininteger | octinteger | hexinteger\n decinteger: nonzerodigit ([\"_\"] digit)* | \"0\" ([\"_\"] \"0\")*\n bininteger: \"0\" (\"b\" | \"B\") ([\"_\"] bindigit)+\n octinteger: \"0\" (\"o\" | \"O\") ([\"_\"] octdigit)+\n hexinteger: \"0\" (\"x\" | \"X\") ([\"_\"] hexdigit)+\n nonzerodigit: \"1\"...\"9\"\n digit: \"0\"...\"9\"\n bindigit: \"0\" | \"1\"\n octdigit: \"0\"...\"7\"\n hexdigit: digit | \"a\"...\"f\" | \"A\"...\"F\"\n\nFor floating-point and complex literals:\n\n floatnumber: pointfloat | exponentfloat\n pointfloat: [digitpart] fraction | digitpart \".\"\n exponentfloat: (digitpart | pointfloat) exponent\n digitpart: digit ([\"_\"] digit)*\n fraction: \".\" digitpart\n exponent: (\"e\" | \"E\") [\"+\" | \"-\"] digitpart\n imagnumber: (floatnumber | digitpart) (\"j\" | \"J\")\n\nConstructors\n\nFollowing the same rules for placement, underscores will be allowed in\nthe following constructors:\n\n- int() (with any base)\n- float()\n- complex()\n- Decimal()\n\nFurther changes\n\nThe new-style number-to-string formatting language will be extended to\nallow _ as a thousands separator, where currently only , is supported.\nThis can be used to easily generate code with more readable literals.[1]\n\nThe syntax would be the same as for the comma, e.g. {:10_} for a width\nof 10 with _ separator.\n\nFor the b, x and o format specifiers, _ will be allowed and group by 4\ndigits.\n\nPrior Art\n\nThose languages that do allow underscore grouping implement a large\nvariety of rules for allowed placement of underscores. In cases where\nthe language spec contradicts the actual behavior, the actual behavior\nis listed. (\"single\" or \"multiple\" refer to allowing runs of consecutive\nunderscores.)\n\n- Ada: single, only between digits[2]\n- C# (open proposal for 7.0): multiple, only between digits[3]\n- C++14: single, between digits (different separator chosen)[4]\n- D: multiple, anywhere, including trailing[5]\n- Java: multiple, only between digits[6]\n- Julia: single, only between digits (but not in float exponent parts)\n [7]\n- Perl 5: multiple, basically anywhere, although docs say it's\n restricted to one underscore between digits[8]\n- Ruby: single, only between digits (although docs say \"anywhere\") [9]\n- Rust: multiple, anywhere, except for between exponent \"e\" and digits\n [10]\n- Swift: multiple, between digits and trailing (although textual\n description says only \"between digits\")[11]\n\nAlternative Syntax\n\nUnderscore Placement Rules\n\nInstead of the relatively strict rule specified above, the use of\nunderscores could be less limited. As seen in other languages, common\nrules include:\n\n- Only one consecutive underscore allowed, and only between digits.\n- Multiple consecutive underscores allowed, but only between digits.\n- Multiple consecutive underscores allowed, in most positions except\n for the start of the literal, or special positions like after a\n decimal point.\n\nThe syntax in this PEP has ultimately been selected because it covers\nthe common use cases, and does not allow for syntax that would have to\nbe discouraged in style guides anyway.\n\nA less common rule would be to allow underscores only every N digits\n(where N could be 3 for decimal literals, or 4 for hexadecimal ones).\nThis is unnecessarily restrictive, especially considering the separator\nplacement is different in different cultures.\n\nDifferent Separators\n\nA proposed alternate syntax was to use whitespace for grouping. Although\nstrings are a precedent for combining adjoining literals, the behavior\ncan lead to unexpected effects which are not possible with underscores.\nAlso, no other language is known to use this rule, except for languages\nthat generally disregard any whitespace.\n\nC++14 introduces apostrophes for grouping (because underscores introduce\nambiguity with user-defined literals), which is not considered because\nof the use in Python's string literals.[12]\n\nImplementation\n\nA preliminary patch that implements the specification given above has\nbeen posted to the issue tracker.[13]\n\nReferences\n\nCopyright\n\nThis document has been placed in the public domain.\n\n\f\n\n Local Variables: mode: indented-text indent-tabs-mode: nil\n sentence-end-double-space: t fill-column: 70 coding: utf-8 End:\n\n[1] https://mail.python.org/pipermail/python-dev/2016-February/143283.html\n\n[2] http://archive.adaic.com/standards/83lrm/html/lrm-02-04.html#2.4\n\n[3] https://github.com/dotnet/roslyn/issues/216\n\n[4] http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2013/n3499.html\n\n[5] https://dlang.org/spec/lex.html#integerliteral\n\n[6] https://docs.oracle.com/javase/7/docs/technotes/guides/language/underscores-literals.html\n\n[7] https://web.archive.org/web/20160223175334/http://docs.julialang.org/en/release-0.4/manual/integers-and-floating-point-numbers/\n\n[8] https://perldoc.perl.org/perldata#Scalar-value-constructors\n\n[9] https://ruby-doc.org/core-2.3.0/doc/syntax/literals_rdoc.html#label-Numbers\n\n[10] https://web.archive.org/web/20160304121349/http://doc.rust-lang.org/reference.html#integer-literals\n\n[11] https://docs.swift.org/swift-book/ReferenceManual/LexicalStructure.html\n\n[12] http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2013/n3499.html\n\n[13] http://bugs.python.org/issue26331"},"source":{"kind":"string","value":"python-peps"},"added":{"kind":"string","value":"2024-10-18T13:23:33.064903"},"created":{"kind":"timestamp","value":"2016-02-10T00:00:00","string":"2016-02-10T00:00:00"},"metadata":{"kind":"string","value":"{\n \"license\": \"Public Domain\",\n \"url\": \"https://peps.python.org/pep-0515/\",\n \"authors\": [\n \"Georg Brandl\",\n \"Serhiy Storchaka\"\n ],\n \"pep_number\": \"0515\",\n \"pandoc_version\": \"3.5\"\n}"}}},{"rowIdx":549,"cells":{"id":{"kind":"string","value":"0328"},"text":{"kind":"string","value":"PEP: 328 Title: Imports: Multi-Line and Absolute/Relative Version:\n$Revision$ Last-Modified: $Date$ Author: Aahz \nStatus: Final Type: Standards Track Content-Type: text/x-rst Created:\n21-Dec-2003 Python-Version: 2.4, 2.5, 2.6 Post-History: 08-Mar-2004\n\nAbstract\n\nThe import statement has two problems:\n\n- Long import statements can be difficult to write, requiring various\n contortions to fit Pythonic style guidelines.\n- Imports can be ambiguous in the face of packages; within a package,\n it's not clear whether import foo refers to a module within the\n package or some module outside the package. (More precisely, a local\n module or package can shadow another hanging directly off sys.path.)\n\nFor the first problem, it is proposed that parentheses be permitted to\nenclose multiple names, thus allowing Python's standard mechanisms for\nmulti-line values to apply. For the second problem, it is proposed that\nall import statements be absolute by default (searching sys.path only)\nwith special syntax (leading dots) for accessing package-relative\nimports.\n\nTimeline\n\nIn Python 2.5, you must enable the new absolute import behavior with :\n\n from __future__ import absolute_import\n\nYou may use relative imports freely. In Python 2.6, any import statement\nthat results in an intra-package import will raise DeprecationWarning\n(this also applies to from <> import that fails to use the relative\nimport syntax).\n\nRationale for Parentheses\n\nCurrently, if you want to import a lot of names from a module or\npackage, you have to choose one of several unpalatable options:\n\n- Write a long line with backslash continuations:\n\n from Tkinter import Tk, Frame, Button, Entry, Canvas, Text, \\\n LEFT, DISABLED, NORMAL, RIDGE, END\n\n- Write multiple import statements:\n\n from Tkinter import Tk, Frame, Button, Entry, Canvas, Text\n from Tkinter import LEFT, DISABLED, NORMAL, RIDGE, END\n\n(import * is not an option ;-)\n\nInstead, it should be possible to use Python's standard grouping\nmechanism (parentheses) to write the import statement:\n\n from Tkinter import (Tk, Frame, Button, Entry, Canvas, Text,\n LEFT, DISABLED, NORMAL, RIDGE, END)\n\nThis part of the proposal had BDFL approval from the beginning.\n\nParentheses support was added to Python 2.4.\n\nRationale for Absolute Imports\n\nIn Python 2.4 and earlier, if you're reading a module located inside a\npackage, it is not clear whether :\n\n import foo\n\nrefers to a top-level module or to another module inside the package. As\nPython's library expands, more and more existing package internal\nmodules suddenly shadow standard library modules by accident. It's a\nparticularly difficult problem inside packages because there's no way to\nspecify which module is meant. To resolve the ambiguity, it is proposed\nthat foo will always be a module or package reachable from sys.path.\nThis is called an absolute import.\n\nThe python-dev community chose absolute imports as the default because\nthey're the more common use case and because absolute imports can\nprovide all the functionality of relative (intra-package) imports --\nalbeit at the cost of difficulty when renaming package pieces higher up\nin the hierarchy or when moving one package inside another.\n\nBecause this represents a change in semantics, absolute imports will be\noptional in Python 2.5 and 2.6 through the use of :\n\n from __future__ import absolute_import\n\nThis part of the proposal had BDFL approval from the beginning.\n\nRationale for Relative Imports\n\nWith the shift to absolute imports, the question arose whether relative\nimports should be allowed at all. Several use cases were presented, the\nmost important of which is being able to rearrange the structure of\nlarge packages without having to edit sub-packages. In addition, a\nmodule inside a package can't easily import itself without relative\nimports.\n\nGuido approved of the idea of relative imports, but there has been a lot\nof disagreement on the spelling (syntax). There does seem to be\nagreement that relative imports will require listing specific names to\nimport (that is, import foo as a bare term will always be an absolute\nimport).\n\nHere are the contenders:\n\n- One from Guido:\n\n from .foo import bar\n\n and :\n\n from ...foo import bar\n\n These two forms have a couple of different suggested semantics. One\n semantic is to make each dot represent one level. There have been\n many complaints about the difficulty of counting dots. Another\n option is to only allow one level of relative import. That misses a\n lot of functionality, and people still complained about missing the\n dot in the one-dot form. The final option is to define an algorithm\n for finding relative modules and packages; the objection here is\n \"Explicit is better than implicit\". (The algorithm proposed is\n \"search up from current package directory until the ultimate package\n parent gets hit\".)\n\n Some people have suggested other punctuation as the separator, such\n as \"-\" or \"^\".\n\n Some people have suggested using \"*\":\n\n from *.foo import bar\n\n- The next set of options is conflated from several posters:\n\n from __pkg__.__pkg__ import\n\n and :\n\n from .__parent__.__parent__ import\n\n Many people (Guido included) think these look ugly, but they are\n clear and explicit. Overall, more people prefer __pkg__ as the\n shorter option.\n\n- One suggestion was to allow only sibling references. In other words,\n you would not be able to use relative imports to refer to modules\n higher in the package tree. You would then be able to do either :\n\n from .spam import eggs\n\n or :\n\n import .spam.eggs\n\n- Some people favor allowing indexed parents:\n\n from -2.spam import eggs\n\n In this scenario, importing from the current directory would be a\n simple :\n\n from .spam import eggs\n\n- Finally, some people dislike the way you have to change import to\n from ... import when you want to dig inside a package. They suggest\n completely rewriting the import syntax:\n\n from MODULE import NAMES as RENAME searching HOW\n\n or :\n\n import NAMES as RENAME from MODULE searching HOW\n [from NAMES] [in WHERE] import ...\n\n However, this most likely could not be implemented for Python 2.5\n (too big a change), and allowing relative imports is sufficiently\n critical that we need something now (given that the standard import\n will change to absolute import). More than that, this proposed\n syntax has several open questions:\n\n - What is the precise proposed syntax? (Which clauses are optional\n under which circumstances?)\n\n - How strongly does the searching clause bind? In other words, do\n you write:\n\n import foo as bar searching XXX, spam as ham searching XXX\n\n or:\n\n import foo as bar, spam as ham searching XXX\n\nGuido's Decision\n\nGuido has Pronounced[1] that relative imports will use leading dots. A\nsingle leading dot indicates a relative import, starting with the\ncurrent package. Two or more leading dots give a relative import to the\nparent(s) of the current package, one level per dot after the first.\nHere's a sample package layout:\n\n package/\n __init__.py\n subpackage1/\n __init__.py\n moduleX.py\n moduleY.py\n subpackage2/\n __init__.py\n moduleZ.py\n moduleA.py\n\nAssuming that the current file is either moduleX.py or\nsubpackage1/__init__.py, following are correct usages of the new syntax:\n\n from .moduleY import spam\n from .moduleY import spam as ham\n from . import moduleY\n from ..subpackage1 import moduleY\n from ..subpackage2.moduleZ import eggs\n from ..moduleA import foo\n from ...package import bar\n from ...sys import path\n\nNote that while that last case is legal, it is certainly discouraged\n(\"insane\" was the word Guido used).\n\nRelative imports must always use from <> import; import <> is always\nabsolute. Of course, absolute imports can use from <> import by omitting\nthe leading dots. The reason import .foo is prohibited is because after\n:\n\n import XXX.YYY.ZZZ\n\nthen :\n\n XXX.YYY.ZZZ\n\nis usable in an expression. But :\n\n .moduleY\n\nis not usable in an expression.\n\nRelative Imports and __name__\n\nRelative imports use a module's __name__ attribute to determine that\nmodule's position in the package hierarchy. If the module's name does\nnot contain any package information (e.g. it is set to '__main__') then\nrelative imports are resolved as if the module were a top level module,\nregardless of where the module is actually located on the file system.\n\nRelative Imports and Indirection Entries in sys.modules\n\nWhen packages were introduced, the concept of an indirection entry in\nsys.modules came into existence[2]. When an entry in sys.modules for a\nmodule within a package had a value of None, it represented that the\nmodule actually referenced the top-level module. For instance,\n'Sound.Effects.string' might have a value of None in sys.modules. That\nmeant any import that resolved to that name actually was to import the\ntop-level 'string' module.\n\nThis introduced an optimization for when a relative import was meant to\nresolve to an absolute import. But since this PEP makes a very clear\ndelineation between absolute and relative imports, this optimization is\nno longer needed. When absolute/relative imports become the only import\nsemantics available then indirection entries in sys.modules will no\nlonger be supported.\n\nReferences\n\nFor more background, see the following python-dev threads:\n\n- Re: Christmas Wishlist\n- Re: Python-Dev Digest, Vol 5, Issue 57\n- Relative import\n- Another Strategy for Relative Import\n\nCopyright\n\nThis document has been placed in the public domain.\n\n\f\n\n Local Variables: mode: indented-text indent-tabs-mode: nil\n sentence-end-double-space: t fill-column: 70 End:\n\n[1] https://mail.python.org/pipermail/python-dev/2004-March/043739.html\n\n[2] https://www.python.org/doc/essays/packages/"},"source":{"kind":"string","value":"python-peps"},"added":{"kind":"string","value":"2024-10-18T13:23:33.079725"},"created":{"kind":"timestamp","value":"2003-12-21T00:00:00","string":"2003-12-21T00:00:00"},"metadata":{"kind":"string","value":"{\n \"license\": \"Public Domain\",\n \"url\": \"https://peps.python.org/pep-0328/\",\n \"authors\": [\n \"Aahz\"\n ],\n \"pep_number\": \"0328\",\n \"pandoc_version\": \"3.5\"\n}"}}},{"rowIdx":550,"cells":{"id":{"kind":"string","value":"0281"},"text":{"kind":"string","value":"PEP: 281 Title: Loop Counter Iteration with range and xrange Author:\nMagnus Lie Hetland Status: Rejected Type: Standards\nTrack Content-Type: text/x-rst Created: 11-Feb-2002 Python-Version: 2.3\nPost-History:\n\nAbstract\n\nThis PEP describes yet another way of exposing the loop counter in\nfor-loops. It basically proposes that the functionality of the function\nindices() from PEP 212 be included in the existing functions range() and\nxrange().\n\nPronouncement\n\nIn commenting on PEP 279's enumerate() function, this PEP's author\noffered, \"I'm quite happy to have it make PEP 281 obsolete.\"\nSubsequently, PEP 279 was accepted into Python 2.3.\n\nOn 17 June 2005, the BDFL concurred with it being obsolete and hereby\nrejected the PEP. For the record, he found some of the examples to\nsomewhat jarring in appearance:\n\n >>> range(range(5), range(10), range(2))\n [5, 7, 9]\n\nMotivation\n\nIt is often desirable to loop over the indices of a sequence. PEP 212\ndescribes several ways of doing this, including adding a built-in\nfunction called indices, conceptually defined as:\n\n def indices(sequence):\n return range(len(sequence))\n\nOn the assumption that adding functionality to an existing built-in\nfunction may be less intrusive than adding a new built-in function, this\nPEP proposes adding this functionality to the existing functions range()\nand xrange().\n\nSpecification\n\nIt is proposed that all three arguments to the built-in functions\nrange() and xrange() are allowed to be objects with a length (i.e.\nobjects implementing the __len__ method). If an argument cannot be\ninterpreted as an integer (i.e. it has no __int__ method), its length\nwill be used instead.\n\nExamples:\n\n >>> range(range(10))\n [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]\n >>> range(range(5), range(10))\n [5, 6, 7, 8, 9]\n >>> range(range(5), range(10), range(2))\n [5, 7, 9]\n >>> list(xrange(range(10)))\n [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]\n >>> list(xrange(xrange(10)))\n [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]\n\n # Number the lines of a file:\n lines = file.readlines()\n for num in range(lines):\n print num, lines[num]\n\nAlternatives\n\nA natural alternative to the above specification is allowing xrange() to\naccess its arguments in a lazy manner. Thus, instead of using their\nlength explicitly, xrange can return one index for each element of the\nstop argument until the end is reached. A similar lazy treatment makes\nlittle sense for the start and step arguments since their length must be\ncalculated before iteration can begin. (Actually, the length of the step\nargument isn't needed until the second element is returned.)\n\nA pseudo-implementation (using only the stop argument, and assuming that\nit is iterable) is:\n\n def xrange(stop):\n i = 0\n for x in stop:\n yield i\n i += 1\n\nTesting whether to use int() or lazy iteration could be done by checking\nfor an __iter__ attribute. (This example assumes the presence of\ngenerators, but could easily have been implemented as a plain iterator\nobject.)\n\nIt may be questionable whether this feature is truly useful, since one\nwould not be able to access the elements of the iterable object inside\nthe for loop through indexing.\n\nExample:\n\n # Printing the numbers of the lines of a file:\n for num in range(file):\n print num # The line itself is not accessible\n\nA more controversial alternative (to deal with this) would be to let\nrange() behave like the function irange() of PEP 212 when supplied with\na sequence.\n\nExample:\n\n >>> range(5)\n [0, 1, 2, 3, 4]\n >>> range('abcde')\n [(0, 'a'), (1, 'b'), (2, 'c'), (3, 'd'), (4, 'e')]\n\nBackwards Compatibility\n\nThe proposal could cause backwards incompatibilities if arguments are\nused which implement both __int__ and __len__ (or __iter__ in the case\nof lazy iteration with xrange). The author does not believe that this is\na significant problem.\n\nCopyright\n\nThis document has been placed in the public domain."},"source":{"kind":"string","value":"python-peps"},"added":{"kind":"string","value":"2024-10-18T13:23:33.086092"},"created":{"kind":"timestamp","value":"2002-02-11T00:00:00","string":"2002-02-11T00:00:00"},"metadata":{"kind":"string","value":"{\n \"license\": \"Public Domain\",\n \"url\": \"https://peps.python.org/pep-0281/\",\n \"authors\": [\n \"Magnus Lie Hetland\"\n ],\n \"pep_number\": \"0281\",\n \"pandoc_version\": \"3.5\"\n}"}}},{"rowIdx":551,"cells":{"id":{"kind":"string","value":"0600"},"text":{"kind":"string","value":"PEP: 600 Title: Future 'manylinux' Platform Tags for Portable Linux\nBuilt Distributions Version: $Revision$ Last-Modified: $Date$ Author:\nNathaniel J. Smith , Thomas Kluyver\n Sponsor: Paul Moore \nBDFL-Delegate: Paul Moore Discussions-To:\nhttps://discuss.python.org/t/the-next-manylinux-specification/1043\nStatus: Final Type: Standards Track Topic: Packaging Content-Type:\ntext/x-rst Created: 03-May-2019 Post-History: 03-May-2019 Replaces: 513,\n571, 599 Resolution:\nhttps://discuss.python.org/t/pep-600-future-manylinux-platform-tags-for-portable-linux-built-distributions/2414/27\n\nAbstract\n\nThis PEP proposes a scheme for new 'manylinux' wheel tags to be defined\nwithout requiring a PEP for every specific tag, similar to how Windows\nand macOS tags already work. This will allow package maintainers to take\nadvantage of new tags more quickly, while making better use of limited\nvolunteer time.\n\nNon-goals include: handling non-glibc-based platforms; integrating with\nexternal package managers or handling external dependencies such as\nCUDA; making manylinux tags more sophisticated than their Windows/macOS\nequivalents; doing anything besides taking our existing tried-and-tested\napproach and streamlining it. These are important issues and other PEPs\nmay address them in the future, but for this PEP they're out of scope.\n\nRationale\n\nPython users appreciate it when PyPI has pre-compiled packages for their\nplatform, because it makes installation fast and simple. But\ndistributing pre-compiled binaries on Linux is challenging because of\nthe diversity of Linux-based platforms. For example, Debian, Android,\nand Alpine all use the Linux kernel, but with radically different\nuserspace libraries, which makes it difficult or impossible to create a\nsingle wheel that works on all three. This complexity has caused many\nprevious discussions of Linux wheels to stall out.\n\nThe \"manylinux\" project succeeded by adopting a strategy of ruthless\npragmatism. We chose a large but tractable set of Linux platforms –\nspecifically, mainstream glibc-based distributions like Debian,\nOpenSuSE, Ubuntu, RHEL, etc. – and then we did whatever it takes to make\nwheels that work across all these platforms.\n\nThis approach requires many compromises. Manylinux wheels can only rely\non external libraries that maintain a consistent ABI and are universally\navailable across all these distributions, which in practice restricts\nthem to a small set of core libraries like glibc and a few others.\nWheels have to be built on carefully-chosen platforms of the oldest\npossible vintage, using a Python that is itself built in a\ncarefully-chosen configuration. Other shared library dependencies have\nto be bundled into the wheel, which requires a complex process to avoid\ncollisions between unrelated wheels. And finally, the details of these\nrequirements change over time, as new distro versions are released, and\nold ones fall out of use.\n\nIt turns out that these requirements are not too onerous: they're\nessentially equivalent to what you have to do to ship Windows or macOS\nwheels, and the manylinux approach has achieved substantial uptake among\nboth package maintainers and end-users. But any manylinux PEP needs some\nway to address these complexities.\n\nIn previous manylinux PEPs (PEP 513, PEP 571, PEP 599), we've done this\nby attempting to write down in the PEP the exact set of libraries,\nsymbol versions, Python configuration, etc. that we believed would lead\nto wheels that work on all mainstream glibc-based Linux systems. But\nthis created several problems:\n\nFirst, PEPs are generally supposed to be normative references: if\nsoftware doesn't match the PEP, then we fix the software. But in this\ncase, the PEPs are attempting to describe Linux distributions, which are\na moving target, and do not consider our PEPs to constrain their\nbehavior. This means that we've been taking on an unbounded commitment\nto keep updating every manylinux PEP whenever the Linux distro landscape\nchanges. This is a substantial commitment for unfunded volunteers to\ntake on, and it's not clear that this work produces value for our users.\n\nAnd second, every time we move manylinux forward to a newer range of\nsupported platforms, or add support for a new architecture, we have to\ngo through a fairly elaborate process: writing a new PEP, updating the\nPyPI and pip codebases to recognize the new tag, waiting for the new pip\nto percolate to users, etc. None of this happens on Windows/macOS; it's\nonly a tax on Linux maintainers. This slows deployment of new manylinux\nversions, and consumes part of our community's limited PEP review\nbandwidth, thus slowing progress of the Python packaging ecosystem as a\nwhole. This is especially problematic for less-popular architectures,\nwho have less volunteer resources to overcome these barriers.\n\nHow can we fix it?\n\nA manylinux PEP has to address three main audiences:\n\n- Package installers, like pip, need to be able to determine which\n wheel tags are compatible with the system they find themselves\n running on. This requires some automated process to introspect the\n system and match it up with wheel tags.\n- Package indexes, like PyPI, need to be able to validate which wheel\n tags are valid. Generally, this just requires something like a list\n of valid tags, or regex they match, with no need to know anything\n about the actual semantics for individual tags. (But see the\n discussion of upload verification below.)\n- Package maintainers need to be able to build wheels that meet the\n requirements for a given wheel tag.\n\nHere's the key insight behind this new PEP: it's crucial that different\npackage installers and package indexes all agree on which manylinux tags\nare valid and which systems they install on, so we need a PEP to specify\nthese – but, these are straightforward, and don't really change between\nmanylinux versions. The complicated part that keeps changing is the\nprocess of actually building the wheels – but, if there are multiple\ncompeting build environments, it doesn't matter whether they use exactly\nthe same rules as each other, as long as they all produce wheels that\nwork on end-user systems. Therefore, we don't need an interoperability\nstandard for building wheels, so we don't need to write the details into\na PEP.\n\nTo further convince ourselves that this approach will work, let's look\nagain at how we handle wheels on Windows and macOS: the PEPs describe\nwhich tags are valid, and which systems they're supposed to work on, but\nnot how to actually build wheels for those platforms. And in practice,\nif you want to distribute Windows or macOS wheels, you might have to\njump through some complicated and poorly documented hoops in order to\nbundle dependencies, target the right range of OS versions, etc. But the\nsystem works, and the way to improve it is to write better docs and\nbuild better tooling; no-one thinks that the way to make Windows wheels\nwork better is to publish a PEP describing which symbols we think\nMicrosoft should be including in their libraries and how their linker\nought to work. This PEP extends that philosophy to manylinux as well.\n\nSpecification\n\nCore definition\n\nTags using the new scheme will look like:\n\n manylinux_2_17_x86_64\n\nOr more generally:\n\n manylinux_${GLIBCMAJOR}_${GLIBCMINOR}_${ARCH}\n\nThis tag is a promise: the wheel's creator promises that the wheel will\nwork on any mainstream Linux distro that uses glibc version\n${GLIBCMAJOR}.${GLIBCMINOR} or later, and where the ${ARCH} matches the\nreturn value from distutils.util.get_platform(). (For more detail about\narchitecture tags, see PEP 425.)\n\nIf a user installs this wheel into an environment that matches these\nrequirements and it doesn't work, then that wheel does not comply with\nthis specification. This should be considered a bug in the wheel, and\nit's the wheel creator's responsibility to look for a fix (possibly with\nthe help of the broader community).\n\nThe word \"mainstream\" is intentionally somewhat vague, and should be\ninterpreted expansively. The goal is to rule out weird homebrew Linux\nsystems; generally any distro you've actually heard of should be\nconsidered \"mainstream\". We also provide a way for maintainers of\n\"weird\" distros to manually override this check, though based on\nexperience with previous manylinux PEPs, we don't expect this feature to\nsee much use.\n\nAnd finally, compliant wheels are required to \"play well with others\",\ni.e., installing a manylinux wheel must not cause other unrelated\npackages to break.\n\nAny method of producing wheels which meets these criteria is acceptable.\nHowever, in practice we expect that the auditwheel project will maintain\nan up-to-date set of tools and build images for producing manylinux\nwheels, as well as documentation about how they work and how to use\nthem, and that most maintainers will want to use those. For the latest\ninformation on building manylinux wheels, including recommendations\nabout which build images to use, see https://packaging.python.org.\n\nSince these requirements are fairly high-level, here are some examples\nof how they play out in specific situations:\n\nExample: if a wheel is tagged as manylinux_2_17_x86_64, but it uses\nsymbols that were only added in glibc 2.18, then that wheel won't work\non systems with glibc 2.17. Therefore, we can conclude that this wheel\nis in violation of this specification.\n\nExample: Until ~2017, all major Linux distros included libncursesw.so.5\nas part of their default install. Until that date, a wheel that linked\nto libncursesw.so.5 was compliant with this specification. Then, distros\nstarted switching to ncurses 6, which has a different name and\nincompatible ABI, and stopped installing libncursesw.so.5 by default. So\nafter that date, a wheel that links to libncursesw.so.5 was no longer\ncompliant with this specification.\n\nExample: The Linux ELF linker places all shared library SONAMEs into a\nsingle process-global namespace. If independent wheels used the same\nSONAME for their bundled libraries, they might end up colliding and\nusing the wrong library version, which would violate the \"play well with\nothers\" rule. Therefore, this specification requires that wheels use\nglobally-unique names for all bundled libraries. (Auditwheel currently\naccomplishes this by renaming all bundled libraries to include a\nglobally-unique hash.)\n\nExample: we've observed certain wheels using C++ in ways that interfere\nwith other packages via an unclear mechanism. This is also a violation\nof the \"play well with others\" rule, so those wheels aren't compliant\nwith this specification.\n\nExample: The imaginary architecture LEG v7 has both big-endian and\nlittle-endian variants. Big-endian binaries require a big-endian system,\nand little-endian binaries require a little-endian system. But\nunfortunately, it's discovered that due to a bug in PEP 425, both\nvariants use the same architecture tag, legv7. This makes it impossible\nto create a compliant manylinux_2_17_legv7 wheel: no matter what we do,\nit will crash on some user's systems. So, we write a new PEP defining\narchitecture tags legv7le and legv7be; now we can ship manylinux LEG v7\nwheels.\n\nExample: There's also a LEG v8. It also has big-endian and little-endian\nvariants. But fortunately, it turns out that PEP 425 already does the\nright thing LEG v8, so LEG v8 enthusiasts can start shipping\nmanylinux_2_17_legv8le and manylinux_2_17_legv8be wheels immediately\nonce this PEP is implemented, even though the authors of this PEP don't\nknow anything at all about LEG v8.\n\nLegacy manylinux tags\n\nThe existing manylinux tags are redefined as aliases for new-style tags:\n\n- manylinux1_x86_64 is now an alias for manylinux_2_5_x86_64\n- manylinux1_i686 is now an alias for manylinux_2_5_i686\n- manylinux2010_x86_64 is now an alias for manylinux_2_12_x86_64\n- manylinux2010_i686 is now an alias for manylinux_2_12_i686\n- manylinux2014_x86_64 is now an alias for manylinux_2_17_x86_64\n- manylinux2014_i686 is now an alias for manylinux_2_17_i686\n- manylinux2014_aarch64 is now an alias for manylinux_2_17_aarch64\n- manylinux2014_armv7l is now an alias for manylinux_2_17_armv7l\n- manylinux2014_ppc64 is now an alias for manylinux_2_17_ppc64\n- manylinux2014_ppc64le is now an alias for manylinux_2_17_ppc64le\n- manylinux2014_s390x is now an alias for manylinux_2_17_s390x\n\nThis redefinition is largely a no-op, but does affect a few things:\n\n- Previously, we had an open-ended and growing commitment to keep\n updating every manylinux PEP whenever a new Linux distro was\n released, for the rest of time. By making this PEP normative for the\n older tags, that obligation goes away. When this PEP is accepted,\n the previous manylinux PEPs will receive a final update noting that\n they are no longer maintained and referring to this PEP.\n\n- The \"play well with others\" rule was always intended, but previous\n PEPs didn't state it explicitly; now it's explicit.\n\n- Previous PEPs assumed that glibc 3.x might be incompatible with\n glibc 2.x, so we checked for compatibility between a system and a\n tag using logic like:\n\n sys_major == tag_major and sys_minor >= tag_minor\n\n Recently the glibc maintainers advised us that we should assume that\n glibc will maintain backwards-compatibility indefinitely, even if\n they bump the major version number. So the new check for\n compatibility is:\n\n (sys_major, sys_minor) >= (tag_major, tag_minor)\n\nPackage installers\n\nGenerally, package installers should install manylinux wheels on systems\nthat have an appropriate glibc and architecture, and not otherwise. If\nthere are multiple compatible manylinux wheels available, then the wheel\nwith the highest glibc version should be preferred, in order to take\nadvantage of newer compilers and glibc features.\n\nIn addition, we follow previous specifications, and allow for Python\ndistributors to manually override this check by adding a _manylinux\nmodule to their standard library. If this package is importable, and if\nit defines a function called manylinux_compatible, then package\ninstallers should call this function, passing in the major version,\nminor version, and architecture from the manylinux tag, and it will\neither return a boolean saying whether wheels with the given tag should\nbe considered compatible with the current system, or else None to\nindicate that the default logic should be used.\n\nFor compatibility with previous specifications, if the tag is manylinux1\nor manylinux_2_5 exactly, then we also check the module for a boolean\nattribute manylinux1_compatible, if the tag version is manylinux2010 or\nmanylinux_2_12 exactly, then we also check the module for a boolean\nattribute manylinux2010_compatible, and if the tag version is\nmanylinux2014 or manylinux_2_17 exactly, then we also check the module\nfor a boolean attribute manylinux2014_compatible. If both the new and\nold attributes are defined, then manylinux_compatible takes precedence.\n\nHere's some example code. You don't have to actually use this code, but\nyou can use it for reference if you have questions about the exact\nsemantics:\n\n LEGACY_ALIASES = {\n \"manylinux1_x86_64\": \"manylinux_2_5_x86_64\",\n \"manylinux1_i686\": \"manylinux_2_5_i686\",\n \"manylinux2010_x86_64\": \"manylinux_2_12_x86_64\",\n \"manylinux2010_i686\": \"manylinux_2_12_i686\",\n \"manylinux2014_x86_64\": \"manylinux_2_17_x86_64\",\n \"manylinux2014_i686\": \"manylinux_2_17_i686\",\n \"manylinux2014_aarch64\": \"manylinux_2_17_aarch64\",\n \"manylinux2014_armv7l\": \"manylinux_2_17_armv7l\",\n \"manylinux2014_ppc64\": \"manylinux_2_17_ppc64\",\n \"manylinux2014_ppc64le\": \"manylinux_2_17_ppc64le\",\n \"manylinux2014_s390x\": \"manylinux_2_17_s390x\",\n }\n\n def manylinux_tag_is_compatible_with_this_system(tag):\n # Normalize and parse the tag\n tag = LEGACY_ALIASES.get(tag, tag)\n m = re.match(\"manylinux_([0-9]+)_([0-9]+)_(.*)\", tag)\n if not m:\n return False\n tag_major_str, tag_minor_str, tag_arch = m.groups()\n tag_major = int(tag_major_str)\n tag_minor = int(tag_minor_str)\n\n if not system_uses_glibc():\n return False\n sys_major, sys_minor = get_system_glibc_version()\n if (sys_major, sys_minor) < (tag_major, tag_minor):\n return False\n sys_arch = get_system_arch()\n if sys_arch != tag_arch:\n return False\n\n # Check for manual override\n try:\n import _manylinux\n except ImportError:\n pass\n else:\n if hasattr(_manylinux, \"manylinux_compatible\"):\n result = _manylinux.manylinux_compatible(\n tag_major, tag_minor, tag_arch,\n )\n if result is not None:\n return bool(result)\n else:\n if (tag_major, tag_minor) == (2, 5):\n if hasattr(_manylinux, \"manylinux1_compatible\"):\n return bool(_manylinux.manylinux1_compatible)\n if (tag_major, tag_minor) == (2, 12):\n if hasattr(_manylinux, \"manylinux2010_compatible\"):\n return bool(_manylinux.manylinux2010_compatible)\n\n return True\n\nPackage indexes\n\nThe exact set of wheel tags accepted by PyPI, or any package index, is a\npolicy question, and up to the maintainers of that index. But, we\nrecommend that package indexes accept any wheels whose platform tag\nmatches the following regexes:\n\n- manylinux1_(x86_64|i686)\n- manylinux2010_(x86_64|i686)\n- manylinux2014_(x86_64|i686|aarch64|armv7l|ppc64|ppc64le|s390x)\n- manylinux_[0-9]+_[0-9]+_(.*)\n\nPackage indexes may impose additional requirements; for example, they\nmight audit uploaded wheels and reject those that contain known\nproblems, such as a manylinux_2_17 wheel that references symbols from\nlater glibc versions, or dependencies on external libraries that are\nknown not to exist on all systems. Or a package index might decide to be\nconservative and reject wheels tagged manylinux_2_999, on the grounds\nthat no-one knows what the Linux distro landscape will look like when\nglibc 2.999 is released. We leave the details of any such checks to the\ndiscretion of the package index maintainers.\n\nRejected alternatives\n\nContinuing the manylinux20XX series: As discussed above, this leads to\nmuch more effort-intensive, slower, and more complex rollouts of new\nversions. And while there are two places where it seems at first to have\nsome compensating benefits, if you look more closely this turns out not\nto be the case.\n\nFirst, this forces us to produce human-readable descriptions of how\nLinux distros work, in the text of the PEP. But this is less valuable\nthan it might seem at first, and can actually be handled better by the\nnew \"perennial\" approach anyway.\n\nIf you're trying to build wheels, the main thing you need is a tutorial\non how to use the build images and tooling around them. If you're trying\nto add support for a new build profile or create a competitor to\nauditwheel, then your best resources will be the auditwheel source code\nand issue tracker, which are always going to be more detailed, precise,\nand reliable than a summary spec written in English and without tests.\nDocumentation like the old manylinux20XX PEPs does add value! But in\nboth cases, it's primarily as a secondary reference to provide overview\nand context.\n\nAnd furthermore, the PEP process is poorly suited to maintaining this\nkind of reference documentation – there's a reason we don't keep the pip\nuser manual in the PEPs repository! The auditwheel maintainers are the\nbest situated to understand what kinds of documentation are useful to\ntheir users, and to maintain that documentation over time. For example,\nthere's substantial overlap between the different manylinux versions,\nand the PEP process currently forces us to handle this by copy-pasting\neverything between a growing list of documents; instead, the auditwheel\nmaintainers might choose to factor out the common parts into a single\npiece of shared documentation.\n\nA related concern was that with the perennial approach, it may become\nharder for package maintainers to decide which build profile to target:\ninstead of having to pick between manylinux1, manylinux2010,\nmanylinux2014, ..., they now have a wider array of options like\nmanylinux_2_5, manylinux_2_6, ..., manylinux_2_20, ... But again, we\ndon't believe this will be a problem in practice. In either system, most\npackage maintainers won't be starting by reading PEPs and trying to\nimplement them from scratch. If you're a particularly expert and\nambitious package maintainer who needs to target a new version or new\narchitecture, the perennial approach gives you additional flexibility.\nBut for regular everyday maintainers, we expect they'll start from a\ntutorial like packaging.python.org, and by choosing from existing build\nimages. A tutorial can just as easily recommend manylinux_2_17 as it can\nrecommend manylinux2014, and we expect the actual set of pre-provided\nbuild images to be identical in both cases. And again, by maintaining\nthis documentation in the right place, instead of trying to do it PEPs\nrepository, we expect that we'll end up with documentation that's\nhigher-quality and more fitted to purpose.\n\nFinally, some participants have pointed out that it's very nice to be\nable to look at a wheel and tell definitively whether it meets the\nrequirements of the spec. With the new \"perennial\" approach, we can\nnever say with 100% certainty that a wheel does meet the spec, because\nthat depends on the Linux distros. As engineers we have a well-justified\ndislike for that kind of uncertainty.\n\nHowever: as demonstrated by the examples above, we can still tell\ndefinitively when a wheel doesn't meet the spec, which turns out to be\nwhat's important in practice. And, in practice, with the manylinux20XX\napproach, whenever distros change, we actually change the spec; it takes\na bit longer. So even if a wheel was compliant today, it might be become\nnon-compliant tomorrow. This is frustrating, but unfortunately this\nuncertainty is unavoidable if what you care about is distributing\nworking wheels to users.\n\nSo even on these points where the old approach initially seems to have\nadvantages, we expect the new approach to actually do as well or better.\n\nSwitching to perennial tags, but continuing to write a PEP for each\nversion: This was proposed as a kind of hybrid, to try to get some of\nthe advantages of the perennial tagging system – like easier rollouts of\nnew versions – while keeping the advantages of the manylinux20XX scheme,\nlike forcing us to write documentation about Linux distros, simplifying\noptions for package maintainers, and being able to definitively tell\nwhen a wheel meets the spec. But as discussed above, on a closer look,\nit turns out that these advantages are largely illusory. And this also\ninherits significant disadvantages from the manylinux20XX scheme, like\ncreating indefinite obligations to update a growing list of copy-pasted\nPEPs.\n\nMaking auditwheel normative: Another possibility that was considered was\nto make auditwheel the normative reference on the definition of\nmanylinux, i.e., a wheel would be compliant if and only if\nauditwheel check completed without errors. This was rejected because the\npoint of packaging PEPs is to define interoperability between tools, not\nto bless specific tools.\n\nAdding extra words to the tag string: Another proposal we considered was\nto add extra words to the wheel tag, e.g. manylinux_glibc_2_17 instead\nof manylinux_2_17. The motivation would be to leave the door open to\nother kinds of versioning heuristics in the future – for example, we\ncould have manylinux_glibc_$VERSION and manylinux_alpine_$VERSION.\n\nBut \"manylinux\" has always been a synonym for \"broad compatibility with\nmainstream glibc-based distros\"; reusing it for unrelated build profiles\nlike alpine is more confusing than helpful. Also, some early reviewers\nwho aren't steeped in the details of packaging found the word glibc\nactively misleading, jumping to the conclusion that it meant they needed\na system with exactly that glibc version. And tags like\nmanylinux_$VERSION and alpine_$VERSION also have the advantages of\nparsimony and directness. So we'll go with that."},"source":{"kind":"string","value":"python-peps"},"added":{"kind":"string","value":"2024-10-18T13:23:33.106324"},"created":{"kind":"timestamp","value":"2019-05-03T00:00:00","string":"2019-05-03T00:00:00"},"metadata":{"kind":"string","value":"{\n \"license\": \"Public Domain\",\n \"url\": \"https://peps.python.org/pep-0600/\",\n \"authors\": [\n \"Nathaniel J. Smith\"\n ],\n \"pep_number\": \"0600\",\n \"pandoc_version\": \"3.5\"\n}"}}},{"rowIdx":552,"cells":{"id":{"kind":"string","value":"0544"},"text":{"kind":"string","value":"PEP: 544 Title: Protocols: Structural subtyping (static duck typing)\nAuthor: Ivan Levkivskyi , Jukka Lehtosalo\n, Łukasz Langa \nBDFL-Delegate: Guido van Rossum Discussions-To:\npython-dev@python.org Status: Final Type: Standards Track Topic: Typing\nCreated: 05-Mar-2017 Python-Version: 3.8 Resolution:\nhttps://mail.python.org/archives/list/typing-sig@python.org/message/FDO4KFYWYQEP3U2HVVBEBR3SXPHQSHYR/\n\ntyping:protocols and typing.Protocol\n\nAbstract\n\nType hints introduced in PEP 484 can be used to specify type metadata\nfor static type checkers and other third party tools. However, PEP 484\nonly specifies the semantics of nominal subtyping. In this PEP we\nspecify static and runtime semantics of protocol classes that will\nprovide a support for structural subtyping (static duck typing).\n\nRationale and Goals\n\nCurrently, PEP 484 and the typing module [typing] define abstract base\nclasses for several common Python protocols such as Iterable and Sized.\nThe problem with them is that a class has to be explicitly marked to\nsupport them, which is unpythonic and unlike what one would normally do\nin idiomatic dynamically typed Python code. For example, this conforms\nto PEP 484:\n\n from typing import Sized, Iterable, Iterator\n\n class Bucket(Sized, Iterable[int]):\n ...\n def __len__(self) -> int: ...\n def __iter__(self) -> Iterator[int]: ...\n\nThe same problem appears with user-defined ABCs: they must be explicitly\nsubclassed or registered. This is particularly difficult to do with\nlibrary types as the type objects may be hidden deep in the\nimplementation of the library. Also, extensive use of ABCs might impose\nadditional runtime costs.\n\nThe intention of this PEP is to solve all these problems by allowing\nusers to write the above code without explicit base classes in the class\ndefinition, allowing Bucket to be implicitly considered a subtype of\nboth Sized and Iterable[int] by static type checkers using structural\n[wiki-structural] subtyping:\n\n from typing import Iterator, Iterable\n\n class Bucket:\n ...\n def __len__(self) -> int: ...\n def __iter__(self) -> Iterator[int]: ...\n\n def collect(items: Iterable[int]) -> int: ...\n result: int = collect(Bucket()) # Passes type check\n\nNote that ABCs in typing module already provide structural behavior at\nruntime, isinstance(Bucket(), Iterable) returns True. The main goal of\nthis proposal is to support such behavior statically. The same\nfunctionality will be provided for user-defined protocols, as specified\nbelow. The above code with a protocol class matches common Python\nconventions much better. It is also automatically extensible and works\nwith additional, unrelated classes that happen to implement the required\nprotocol.\n\nNominal vs structural subtyping\n\nStructural subtyping is natural for Python programmers since it matches\nthe runtime semantics of duck typing: an object that has certain\nproperties is treated independently of its actual runtime class.\nHowever, as discussed in PEP 483, both nominal and structural subtyping\nhave their strengths and weaknesses. Therefore, in this PEP we do not\npropose to replace the nominal subtyping described by PEP 484 with\nstructural subtyping completely. Instead, protocol classes as specified\nin this PEP complement normal classes, and users are free to choose\nwhere to apply a particular solution. See section on rejected ideas at\nthe end of this PEP for additional motivation.\n\nNon-goals\n\nAt runtime, protocol classes will be simple ABCs. There is no intent to\nprovide sophisticated runtime instance and class checks against protocol\nclasses. This would be difficult and error-prone and will contradict the\nlogic of PEP 484. As well, following PEP 484 and PEP 526 we state that\nprotocols are completely optional:\n\n- No runtime semantics will be imposed for variables or parameters\n annotated with a protocol class.\n- Any checks will be performed only by third-party type checkers and\n other tools.\n- Programmers are free to not use them even if they use type\n annotations.\n- There is no intent to make protocols non-optional in the future.\n\nTo reiterate, providing complex runtime semantics for protocol classes\nis not a goal of this PEP, the main goal is to provide a support and\nstandards for static structural subtyping. The possibility to use\nprotocols in the runtime context as ABCs is rather a minor bonus that\nexists mostly to provide a seamless transition for projects that already\nuse ABCs.\n\nExisting Approaches to Structural Subtyping\n\nBefore describing the actual specification, we review and comment on\nexisting approaches related to structural subtyping in Python and other\nlanguages:\n\n- zope.interface [zope-interfaces] was one of the first widely used\n approaches to structural subtyping in Python. It is implemented by\n providing special classes to distinguish interface classes from\n normal classes, to mark interface attributes, and to explicitly\n declare implementation. For example:\n\n from zope.interface import Interface, Attribute, implementer\n\n class IEmployee(Interface):\n\n name = Attribute(\"Name of employee\")\n\n def do(work):\n \"\"\"Do some work\"\"\"\n\n @implementer(IEmployee)\n class Employee:\n\n name = 'Anonymous'\n\n def do(self, work):\n return work.start()\n\n Zope interfaces support various contracts and constraints for\n interface classes. For example:\n\n from zope.interface import invariant\n\n def required_contact(obj):\n if not (obj.email or obj.phone):\n raise Exception(\"At least one contact info is required\")\n\n class IPerson(Interface):\n\n name = Attribute(\"Name\")\n email = Attribute(\"Email Address\")\n phone = Attribute(\"Phone Number\")\n\n invariant(required_contact)\n\n Even more detailed invariants are supported. However, Zope\n interfaces rely entirely on runtime validation. Such focus on\n runtime properties goes beyond the scope of the current proposal,\n and static support for invariants might be difficult to implement.\n However, the idea of marking an interface class with a special base\n class is reasonable and easy to implement both statically and at\n runtime.\n\n- Python abstract base classes [abstract-classes] are the standard\n library tool to provide some functionality similar to structural\n subtyping. The drawback of this approach is the necessity to either\n subclass the abstract class or register an implementation\n explicitly:\n\n from abc import ABC\n\n class MyTuple(ABC):\n pass\n\n MyTuple.register(tuple)\n\n assert issubclass(tuple, MyTuple)\n assert isinstance((), MyTuple)\n\n As mentioned in the rationale, we want to avoid such necessity,\n especially in static context. However, in a runtime context, ABCs\n are good candidates for protocol classes and they are already used\n extensively in the typing module.\n\n- Abstract classes defined in collections.abc module [collections-abc]\n are slightly more advanced since they implement a custom\n __subclasshook__() method that allows runtime structural checks\n without explicit registration:\n\n from collections.abc import Iterable\n\n class MyIterable:\n def __iter__(self):\n return []\n\n assert isinstance(MyIterable(), Iterable)\n\n Such behavior seems to be a perfect fit for both runtime and static\n behavior of protocols. As discussed in rationale, we propose to add\n static support for such behavior. In addition, to allow users to\n achieve such runtime behavior for user-defined protocols a special\n @runtime_checkable decorator will be provided, see detailed\n discussion below.\n\n- TypeScript [typescript] provides support for user-defined classes\n and interfaces. Explicit implementation declaration is not required\n and structural subtyping is verified statically. For example:\n\n interface LabeledItem {\n label: string;\n size?: int;\n }\n\n function printLabel(obj: LabeledItem) {\n console.log(obj.label);\n }\n\n let myObj = {size: 10, label: \"Size 10 Object\"};\n printLabel(myObj);\n\n Note that optional interface members are supported. Also, TypeScript\n prohibits redundant members in implementations. While the idea of\n optional members looks interesting, it would complicate this\n proposal and it is not clear how useful it will be. Therefore, it is\n proposed to postpone this; see rejected ideas. In general, the idea\n of static protocol checking without runtime implications looks\n reasonable, and basically this proposal follows the same line.\n\n- Go [golang] uses a more radical approach and makes interfaces the\n primary way to provide type information. Also, assignments are used\n to explicitly ensure implementation:\n\n type SomeInterface interface {\n SomeMethod() ([]byte, error)\n }\n\n if _, ok := someval.(SomeInterface); ok {\n fmt.Printf(\"value implements some interface\")\n }\n\n Both these ideas are questionable in the context of this proposal.\n See the section on rejected ideas.\n\nSpecification\n\nTerminology\n\nWe propose to use the term protocols for types supporting structural\nsubtyping. The reason is that the term iterator protocol, for example,\nis widely understood in the community, and coming up with a new term for\nthis concept in a statically typed context would just create confusion.\n\nThis has the drawback that the term protocol becomes overloaded with two\nsubtly different meanings: the first is the traditional, well-known but\nslightly fuzzy concept of protocols such as iterator; the second is the\nmore explicitly defined concept of protocols in statically typed code.\nThe distinction is not important most of the time, and in other cases we\npropose to just add a qualifier such as protocol classes when referring\nto the static type concept.\n\nIf a class includes a protocol in its MRO, the class is called an\nexplicit subclass of the protocol. If a class is a structural subtype of\na protocol, it is said to implement the protocol and to be compatible\nwith a protocol. If a class is compatible with a protocol but the\nprotocol is not included in the MRO, the class is an implicit subtype of\nthe protocol. (Note that one can explicitly subclass a protocol and\nstill not implement it if a protocol attribute is set to None in the\nsubclass, see Python [data-model] for details.)\n\nThe attributes (variables and methods) of a protocol that are mandatory\nfor other class in order to be considered a structural subtype are\ncalled protocol members.\n\nDefining a protocol\n\nProtocols are defined by including a special new class typing.Protocol\n(an instance of abc.ABCMeta) in the base classes list, typically at the\nend of the list. Here is a simple example:\n\n from typing import Protocol\n\n class SupportsClose(Protocol):\n def close(self) -> None:\n ...\n\nNow if one defines a class Resource with a close() method that has a\ncompatible signature, it would implicitly be a subtype of SupportsClose,\nsince the structural subtyping is used for protocol types:\n\n class Resource:\n ...\n def close(self) -> None:\n self.file.close()\n self.lock.release()\n\nApart from few restrictions explicitly mentioned below, protocol types\ncan be used in every context where a normal types can:\n\n def close_all(things: Iterable[SupportsClose]) -> None:\n for t in things:\n t.close()\n\n f = open('foo.txt')\n r = Resource()\n close_all([f, r]) # OK!\n close_all([1]) # Error: 'int' has no 'close' method\n\nNote that both the user-defined class Resource and the built-in IO type\n(the return type of open()) are considered subtypes of SupportsClose,\nbecause they provide a close() method with a compatible type signature.\n\nProtocol members\n\nAll methods defined in the protocol class body are protocol members,\nboth normal and decorated with @abstractmethod. If any parameters of a\nprotocol method are not annotated, then their types are assumed to be\nAny (see PEP 484). Bodies of protocol methods are type checked. An\nabstract method that should not be called via super() ought to raise\nNotImplementedError. Example:\n\n from typing import Protocol\n from abc import abstractmethod\n\n class Example(Protocol):\n def first(self) -> int: # This is a protocol member\n return 42\n\n @abstractmethod\n def second(self) -> int: # Method without a default implementation\n raise NotImplementedError\n\nStatic methods, class methods, and properties are equally allowed in\nprotocols.\n\nTo define a protocol variable, one can use PEP 526 variable annotations\nin the class body. Additional attributes only defined in the body of a\nmethod by assignment via self are not allowed. The rationale for this is\nthat the protocol class implementation is often not shared by subtypes,\nso the interface should not depend on the default implementation.\nExamples:\n\n from typing import Protocol, List\n\n class Template(Protocol):\n name: str # This is a protocol member\n value: int = 0 # This one too (with default)\n\n def method(self) -> None:\n self.temp: List[int] = [] # Error in type checker\n\n class Concrete:\n def __init__(self, name: str, value: int) -> None:\n self.name = name\n self.value = value\n\n def method(self) -> None:\n return\n\n var: Template = Concrete('value', 42) # OK\n\nTo distinguish between protocol class variables and protocol instance\nvariables, the special ClassVar annotation should be used as specified\nby PEP 526. By default, protocol variables as defined above are\nconsidered readable and writable. To define a read-only protocol\nvariable, one can use an (abstract) property.\n\nExplicitly declaring implementation\n\nTo explicitly declare that a certain class implements a given protocol,\nit can be used as a regular base class. In this case a class could use\ndefault implementations of protocol members. Static analysis tools are\nexpected to automatically detect that a class implements a given\nprotocol. So while it's possible to subclass a protocol explicitly, it's\nnot necessary to do so for the sake of type-checking.\n\nThe default implementations cannot be used if the subtype relationship\nis implicit and only via structural subtyping -- the semantics of\ninheritance is not changed. Examples:\n\n class PColor(Protocol):\n @abstractmethod\n def draw(self) -> str:\n ...\n def complex_method(self) -> int:\n # some complex code here\n ...\n\n class NiceColor(PColor):\n def draw(self) -> str:\n return \"deep blue\"\n\n class BadColor(PColor):\n def draw(self) -> str:\n return super().draw() # Error, no default implementation\n\n class ImplicitColor: # Note no 'PColor' base here\n def draw(self) -> str:\n return \"probably gray\"\n def complex_method(self) -> int:\n # class needs to implement this\n ...\n\n nice: NiceColor\n another: ImplicitColor\n\n def represent(c: PColor) -> None:\n print(c.draw(), c.complex_method())\n\n represent(nice) # OK\n represent(another) # Also OK\n\nNote that there is little difference between explicit and implicit\nsubtypes, the main benefit of explicit subclassing is to get some\nprotocol methods \"for free\". In addition, type checkers can statically\nverify that the class actually implements the protocol correctly:\n\n class RGB(Protocol):\n rgb: Tuple[int, int, int]\n\n @abstractmethod\n def intensity(self) -> int:\n return 0\n\n class Point(RGB):\n def __init__(self, red: int, green: int, blue: str) -> None:\n self.rgb = red, green, blue # Error, 'blue' must be 'int'\n\n # Type checker might warn that 'intensity' is not defined\n\nA class can explicitly inherit from multiple protocols and also from\nnormal classes. In this case methods are resolved using normal MRO and a\ntype checker verifies that all subtyping are correct. The semantics of\n@abstractmethod is not changed, all of them must be implemented by an\nexplicit subclass before it can be instantiated.\n\nMerging and extending protocols\n\nThe general philosophy is that protocols are mostly like regular ABCs,\nbut a static type checker will handle them specially. Subclassing a\nprotocol class would not turn the subclass into a protocol unless it\nalso has typing.Protocol as an explicit base class. Without this base,\nthe class is \"downgraded\" to a regular ABC that cannot be used with\nstructural subtyping. The rationale for this rule is that we don't want\nto accidentally have some class act as a protocol just because one of\nits base classes happens to be one. We still slightly prefer nominal\nsubtyping over structural subtyping in the static typing world.\n\nA subprotocol can be defined by having both one or more protocols as\nimmediate base classes and also having typing.Protocol as an immediate\nbase class:\n\n from typing import Sized, Protocol\n\n class SizedAndClosable(Sized, Protocol):\n def close(self) -> None:\n ...\n\nNow the protocol SizedAndClosable is a protocol with two methods,\n__len__ and close. If one omits Protocol in the base class list, this\nwould be a regular (non-protocol) class that must implement Sized.\nAlternatively, one can implement SizedAndClosable protocol by merging\nthe SupportsClose protocol from the example in the definition section\nwith typing.Sized:\n\n from typing import Sized\n\n class SupportsClose(Protocol):\n def close(self) -> None:\n ...\n\n class SizedAndClosable(Sized, SupportsClose, Protocol):\n pass\n\nThe two definitions of SizedAndClosable are equivalent. Subclass\nrelationships between protocols are not meaningful when considering\nsubtyping, since structural compatibility is the criterion, not the MRO.\n\nIf Protocol is included in the base class list, all the other base\nclasses must be protocols. A protocol can't extend a regular class, see\nrejected ideas for rationale. Note that rules around explicit\nsubclassing are different from regular ABCs, where abstractness is\nsimply defined by having at least one abstract method being\nunimplemented. Protocol classes must be marked explicitly.\n\nGeneric protocols\n\nGeneric protocols are important. For example, SupportsAbs, Iterable and\nIterator are generic protocols. They are defined similar to normal\nnon-protocol generic types:\n\n class Iterable(Protocol[T]):\n @abstractmethod\n def __iter__(self) -> Iterator[T]:\n ...\n\nProtocol[T, S, ...] is allowed as a shorthand for\nProtocol, Generic[T, S, ...].\n\nUser-defined generic protocols support explicitly declared variance.\nType checkers will warn if the inferred variance is different from the\ndeclared variance. Examples:\n\n T = TypeVar('T')\n T_co = TypeVar('T_co', covariant=True)\n T_contra = TypeVar('T_contra', contravariant=True)\n\n class Box(Protocol[T_co]):\n def content(self) -> T_co:\n ...\n\n box: Box[float]\n second_box: Box[int]\n box = second_box # This is OK due to the covariance of 'Box'.\n\n class Sender(Protocol[T_contra]):\n def send(self, data: T_contra) -> int:\n ...\n\n sender: Sender[float]\n new_sender: Sender[int]\n new_sender = sender # OK, 'Sender' is contravariant.\n\n class Proto(Protocol[T]):\n attr: T # this class is invariant, since it has a mutable attribute\n\n var: Proto[float]\n another_var: Proto[int]\n var = another_var # Error! 'Proto[float]' is incompatible with 'Proto[int]'.\n\nNote that unlike nominal classes, de facto covariant protocols cannot be\ndeclared as invariant, since this can break transitivity of subtyping\n(see rejected ideas for details). For example:\n\n T = TypeVar('T')\n\n class AnotherBox(Protocol[T]): # Error, this protocol is covariant in T,\n def content(self) -> T: # not invariant.\n ...\n\nRecursive protocols\n\nRecursive protocols are also supported. Forward references to the\nprotocol class names can be given as strings as specified by PEP 484.\nRecursive protocols are useful for representing self-referential data\nstructures like trees in an abstract fashion:\n\n class Traversable(Protocol):\n def leaves(self) -> Iterable['Traversable']:\n ...\n\nNote that for recursive protocols, a class is considered a subtype of\nthe protocol in situations where the decision depends on itself.\nContinuing the previous example:\n\n class SimpleTree:\n def leaves(self) -> List['SimpleTree']:\n ...\n\n root: Traversable = SimpleTree() # OK\n\n class Tree(Generic[T]):\n def leaves(self) -> List['Tree[T]']:\n ...\n\n def walk(graph: Traversable) -> None:\n ...\n tree: Tree[float] = Tree()\n walk(tree) # OK, 'Tree[float]' is a subtype of 'Traversable'\n\nSelf-types in protocols\n\nThe self-types in protocols follow the\ncorresponding specification <484#annotating-instance-and-class-methods>\nof PEP 484. For example:\n\n C = TypeVar('C', bound='Copyable')\n class Copyable(Protocol):\n def copy(self: C) -> C:\n\n class One:\n def copy(self) -> 'One':\n ...\n\n T = TypeVar('T', bound='Other')\n class Other:\n def copy(self: T) -> T:\n ...\n\n c: Copyable\n c = One() # OK\n c = Other() # Also OK\n\nCallback protocols\n\nProtocols can be used to define flexible callback types that are hard\n(or even impossible) to express using the Callable[...] syntax specified\nby PEP 484, such as variadic, overloaded, and complex generic callbacks.\nThey can be defined as protocols with a __call__ member:\n\n from typing import Optional, List, Protocol\n\n class Combiner(Protocol):\n def __call__(self, *vals: bytes,\n maxlen: Optional[int] = None) -> List[bytes]: ...\n\n def good_cb(*vals: bytes, maxlen: Optional[int] = None) -> List[bytes]:\n ...\n def bad_cb(*vals: bytes, maxitems: Optional[int]) -> List[bytes]:\n ...\n\n comb: Combiner = good_cb # OK\n comb = bad_cb # Error! Argument 2 has incompatible type because of\n # different name and kind in the callback\n\nCallback protocols and Callable[...] types can be used interchangeably.\n\nUsing Protocols\n\nSubtyping relationships with other types\n\nProtocols cannot be instantiated, so there are no values whose runtime\ntype is a protocol. For variables and parameters with protocol types,\nsubtyping relationships are subject to the following rules:\n\n- A protocol is never a subtype of a concrete type.\n- A concrete type X is a subtype of protocol P if and only if X\n implements all protocol members of P with compatible types. In other\n words, subtyping with respect to a protocol is always structural.\n- A protocol P1 is a subtype of another protocol P2 if P1 defines all\n protocol members of P2 with compatible types.\n\nGeneric protocol types follow the same rules of variance as non-protocol\ntypes. Protocol types can be used in all contexts where any other types\ncan be used, such as in Union, ClassVar, type variables bounds, etc.\nGeneric protocols follow the rules for generic abstract classes, except\nfor using structural compatibility instead of compatibility defined by\ninheritance relationships.\n\nStatic type checkers will recognize protocol implementations, even if\nthe corresponding protocols are not imported:\n\n # file lib.py\n from typing import Sized\n\n T = TypeVar('T', contravariant=True)\n class ListLike(Sized, Protocol[T]):\n def append(self, x: T) -> None:\n pass\n\n def populate(lst: ListLike[int]) -> None:\n ...\n\n # file main.py\n from lib import populate # Note that ListLike is NOT imported\n\n class MockStack:\n def __len__(self) -> int:\n return 42\n def append(self, x: int) -> None:\n print(x)\n\n populate([1, 2, 3]) # Passes type check\n populate(MockStack()) # Also OK\n\nUnions and intersections of protocols\n\nUnion of protocol classes behaves the same way as for non-protocol\nclasses. For example:\n\n from typing import Union, Optional, Protocol\n\n class Exitable(Protocol):\n def exit(self) -> int:\n ...\n class Quittable(Protocol):\n def quit(self) -> Optional[int]:\n ...\n\n def finish(task: Union[Exitable, Quittable]) -> int:\n ...\n class DefaultJob:\n ...\n def quit(self) -> int:\n return 0\n finish(DefaultJob()) # OK\n\nOne can use multiple inheritance to define an intersection of protocols.\nExample:\n\n from typing import Iterable, Hashable\n\n class HashableFloats(Iterable[float], Hashable, Protocol):\n pass\n\n def cached_func(args: HashableFloats) -> float:\n ...\n cached_func((1, 2, 3)) # OK, tuple is both hashable and iterable\n\nIf this will prove to be a widely used scenario, then a special\nintersection type construct could be added in future as specified by PEP\n483, see rejected ideas for more details.\n\nType[] and class objects vs protocols\n\nVariables and parameters annotated with Type[Proto] accept only concrete\n(non-protocol) subtypes of Proto. The main reason for this is to allow\ninstantiation of parameters with such type. For example:\n\n class Proto(Protocol):\n @abstractmethod\n def meth(self) -> int:\n ...\n class Concrete:\n def meth(self) -> int:\n return 42\n\n def fun(cls: Type[Proto]) -> int:\n return cls().meth() # OK\n fun(Proto) # Error\n fun(Concrete) # OK\n\nThe same rule applies to variables:\n\n var: Type[Proto]\n var = Proto # Error\n var = Concrete # OK\n var().meth() # OK\n\nAssigning an ABC or a protocol class to a variable is allowed if it is\nnot explicitly typed, and such assignment creates a type alias. For\nnormal (non-abstract) classes, the behavior of Type[] is not changed.\n\nA class object is considered an implementation of a protocol if\naccessing all members on it results in types compatible with the\nprotocol members. For example:\n\n from typing import Any, Protocol\n\n class ProtoA(Protocol):\n def meth(self, x: int) -> int: ...\n class ProtoB(Protocol):\n def meth(self, obj: Any, x: int) -> int: ...\n\n class C:\n def meth(self, x: int) -> int: ...\n\n a: ProtoA = C # Type check error, signatures don't match!\n b: ProtoB = C # OK\n\nNewType() and type aliases\n\nProtocols are essentially anonymous. To emphasize this point, static\ntype checkers might refuse protocol classes inside NewType() to avoid an\nillusion that a distinct type is provided:\n\n from typing import NewType, Protocol, Iterator\n\n class Id(Protocol):\n code: int\n secrets: Iterator[bytes]\n\n UserId = NewType('UserId', Id) # Error, can't provide distinct type\n\nIn contrast, type aliases are fully supported, including generic type\naliases:\n\n from typing import TypeVar, Reversible, Iterable, Sized\n\n T = TypeVar('T')\n class SizedIterable(Iterable[T], Sized, Protocol):\n pass\n CompatReversible = Union[Reversible[T], SizedIterable[T]]\n\nModules as implementations of protocols\n\nA module object is accepted where a protocol is expected if the public\ninterface of the given module is compatible with the expected protocol.\nFor example:\n\n # file default_config.py\n timeout = 100\n one_flag = True\n other_flag = False\n\n # file main.py\n import default_config\n from typing import Protocol\n\n class Options(Protocol):\n timeout: int\n one_flag: bool\n other_flag: bool\n\n def setup(options: Options) -> None:\n ...\n\n setup(default_config) # OK\n\nTo determine compatibility of module level functions, the self argument\nof the corresponding protocol methods is dropped. For example:\n\n # callbacks.py\n def on_error(x: int) -> None:\n ...\n def on_success() -> None:\n ...\n\n # main.py\n import callbacks\n from typing import Protocol\n\n class Reporter(Protocol):\n def on_error(self, x: int) -> None:\n ...\n def on_success(self) -> None:\n ...\n\n rp: Reporter = callbacks # Passes type check\n\n@runtime_checkable decorator and narrowing types by isinstance()\n\nThe default semantics is that isinstance() and issubclass() fail for\nprotocol types. This is in the spirit of duck typing -- protocols\nbasically would be used to model duck typing statically, not explicitly\nat runtime.\n\nHowever, it should be possible for protocol types to implement custom\ninstance and class checks when this makes sense, similar to how Iterable\nand other ABCs in collections.abc and typing already do it, but this is\nlimited to non-generic and unsubscripted generic protocols (Iterable is\nstatically equivalent to Iterable[Any]). The typing module will define a\nspecial @runtime_checkable class decorator that provides the same\nsemantics for class and instance checks as for collections.abc classes,\nessentially making them \"runtime protocols\":\n\n from typing import runtime_checkable, Protocol\n\n @runtime_checkable\n class SupportsClose(Protocol):\n def close(self):\n ...\n\n assert isinstance(open('some/file'), SupportsClose)\n\nNote that instance checks are not 100% reliable statically, this is why\nthis behavior is opt-in, see section on rejected ideas for examples. The\nmost type checkers can do is to treat isinstance(obj, Iterator) roughly\nas a simpler way to write\nhasattr(x, '__iter__') and hasattr(x, '__next__'). To minimize the risks\nfor this feature, the following rules are applied.\n\nDefinitions:\n\n- Data, and non-data protocols: A protocol is called non-data protocol\n if it only contains methods as members (for example Sized, Iterator,\n etc). A protocol that contains at least one non-method member (like\n x: int) is called a data protocol.\n- Unsafe overlap: A type X is called unsafely overlapping with a\n protocol P, if X is not a subtype of P, but it is a subtype of the\n type erased version of P where all members have type Any. In\n addition, if at least one element of a union unsafely overlaps with\n a protocol P, then the whole union is unsafely overlapping with P.\n\nSpecification:\n\n- A protocol can be used as a second argument in isinstance() and\n issubclass() only if it is explicitly opt-in by @runtime_checkable\n decorator. This requirement exists because protocol checks are not\n type safe in case of dynamically set attributes, and because type\n checkers can only prove that an isinstance() check is safe only for\n a given class, not for all its subclasses.\n- isinstance() can be used with both data and non-data protocols,\n while issubclass() can be used only with non-data protocols. This\n restriction exists because some data attributes can be set on an\n instance in constructor and this information is not always available\n on the class object.\n- Type checkers should reject an isinstance() or issubclass() call, if\n there is an unsafe overlap between the type of the first argument\n and the protocol.\n- Type checkers should be able to select a correct element from a\n union after a safe isinstance() or issubclass() call. For narrowing\n from non-union types, type checkers can use their best judgement\n (this is intentionally unspecified, since a precise specification\n would require intersection types).\n\nUsing Protocols in Python 2.7 - 3.5\n\nVariable annotation syntax was added in Python 3.6, so that the syntax\nfor defining protocol variables proposed in specification section can't\nbe used if support for earlier versions is needed. To define these in a\nmanner compatible with older versions of Python one can use properties.\nProperties can be settable and/or abstract if needed:\n\n class Foo(Protocol):\n @property\n def c(self) -> int:\n return 42 # Default value can be provided for property...\n\n @abstractproperty\n def d(self) -> int: # ... or it can be abstract\n return 0\n\nAlso function type comments can be used as per PEP 484 (for example to\nprovide compatibility with Python 2). The typing module changes proposed\nin this PEP will also be backported to earlier versions via the backport\ncurrently available on PyPI.\n\nRuntime Implementation of Protocol Classes\n\nImplementation details\n\nThe runtime implementation could be done in pure Python without any\neffects on the core interpreter and standard library except in the\ntyping module, and a minor update to collections.abc:\n\n- Define class typing.Protocol similar to typing.Generic.\n- Implement functionality to detect whether a class is a protocol or\n not. Add a class attribute _is_protocol = True if that is the case.\n Verify that a protocol class only has protocol base classes in the\n MRO (except for object).\n- Implement @runtime_checkable that allows __subclasshook__()\n performing structural instance and subclass checks as in\n collections.abc classes.\n- All structural subtyping checks will be performed by static type\n checkers, such as mypy [mypy]. No additional support for protocol\n validation will be provided at runtime.\n\nChanges in the typing module\n\nThe following classes in typing module will be protocols:\n\n- Callable\n- Awaitable\n- Iterable, Iterator\n- AsyncIterable, AsyncIterator\n- Hashable\n- Sized\n- Container\n- Collection\n- Reversible\n- ContextManager, AsyncContextManager\n- SupportsAbs (and other Supports* classes)\n\nMost of these classes are small and conceptually simple. It is easy to\nsee what are the methods these protocols implement, and immediately\nrecognize the corresponding runtime protocol counterpart. Practically,\nfew changes will be needed in typing since some of these classes already\nbehave the necessary way at runtime. Most of these will need to be\nupdated only in the corresponding typeshed stubs [typeshed].\n\nAll other concrete generic classes such as List, Set, IO, Deque, etc are\nsufficiently complex that it makes sense to keep them non-protocols\n(i.e. require code to be explicit about them). Also, it is too easy to\nleave some methods unimplemented by accident, and explicitly marking the\nsubclass relationship allows type checkers to pinpoint the missing\nimplementations.\n\nIntrospection\n\nThe existing class introspection machinery (dir, __annotations__ etc)\ncan be used with protocols. In addition, all introspection tools\nimplemented in the typing module will support protocols. Since all\nattributes need to be defined in the class body based on this proposal,\nprotocol classes will have even better perspective for introspection\nthan regular classes where attributes can be defined implicitly --\nprotocol attributes can't be initialized in ways that are not visible to\nintrospection (using setattr(), assignment via self, etc.). Still, some\nthings like types of attributes will not be visible at runtime in Python\n3.5 and earlier, but this looks like a reasonable limitation.\n\nThere will be only limited support of isinstance() and issubclass() as\ndiscussed above (these will always fail with TypeError for subscripted\ngeneric protocols, since a reliable answer could not be given at runtime\nin this case). But together with other introspection tools this give a\nreasonable perspective for runtime type checking tools.\n\nRejected/Postponed Ideas\n\nThe ideas in this section were previously discussed in [several]\n[discussions] [elsewhere].\n\nMake every class a protocol by default\n\nSome languages such as Go make structural subtyping the only or the\nprimary form of subtyping. We could achieve a similar result by making\nall classes protocols by default (or even always). However we believe\nthat it is better to require classes to be explicitly marked as\nprotocols, for the following reasons:\n\n- Protocols don't have some properties of regular classes. In\n particular, isinstance(), as defined for normal classes, is based on\n the nominal hierarchy. In order to make everything a protocol by\n default, and have isinstance() work would require changing its\n semantics, which won't happen.\n- Protocol classes should generally not have many method\n implementations, as they describe an interface, not an\n implementation. Most classes have many method implementations,\n making them bad protocol classes.\n- Experience suggests that many classes are not practical as protocols\n anyway, mainly because their interfaces are too large, complex or\n implementation-oriented (for example, they may include de facto\n private attributes and methods without a __ prefix).\n- Most actually useful protocols in existing Python code seem to be\n implicit. The ABCs in typing and collections.abc are rather an\n exception, but even they are recent additions to Python and most\n programmers do not use them yet.\n- Many built-in functions only accept concrete instances of int (and\n subclass instances), and similarly for other built-in classes.\n Making int a structural type wouldn't be safe without major changes\n to the Python runtime, which won't happen.\n\nProtocols subclassing normal classes\n\nThe main rationale to prohibit this is to preserve transitivity of\nsubtyping, consider this example:\n\n from typing import Protocol\n\n class Base:\n attr: str\n\n class Proto(Base, Protocol):\n def meth(self) -> int:\n ...\n\n class C:\n attr: str\n def meth(self) -> int:\n return 0\n\nNow, C is a subtype of Proto, and Proto is a subtype of Base. But C\ncannot be a subtype of Base (since the latter is not a protocol). This\nsituation would be really weird. In addition, there is an ambiguity\nabout whether attributes of Base should become protocol members of\nProto.\n\nSupport optional protocol members\n\nWe can come up with examples where it would be handy to be able to say\nthat a method or data attribute does not need to be present in a class\nimplementing a protocol, but if it is present, it must conform to a\nspecific signature or type. One could use a hasattr() check to determine\nwhether they can use the attribute on a particular instance.\n\nLanguages such as TypeScript have similar features and apparently they\nare pretty commonly used. The current realistic potential use cases for\nprotocols in Python don't require these. In the interest of simplicity,\nwe propose to not support optional methods or attributes. We can always\nrevisit this later if there is an actual need.\n\nAllow only protocol methods and force use of getters and setters\n\nOne could argue that protocols typically only define methods, but not\nvariables. However, using getters and setters in cases where only a\nsimple variable is needed would be quite unpythonic. Moreover, the\nwidespread use of properties (that often act as type validators) in\nlarge code bases is partially due to previous absence of static type\ncheckers for Python, the problem that PEP 484 and this PEP are aiming to\nsolve. For example:\n\n # without static types\n\n class MyClass:\n @property\n def my_attr(self):\n return self._my_attr\n @my_attr.setter\n def my_attr(self, value):\n if not isinstance(value, int):\n raise ValidationError(\"An integer expected for my_attr\")\n self._my_attr = value\n\n # with static types\n\n class MyClass:\n my_attr: int\n\nSupport non-protocol members\n\nThere was an idea to make some methods \"non-protocol\" (i.e. not\nnecessary to implement, and inherited in explicit subclassing), but it\nwas rejected, since this complicates things. For example, consider this\nsituation:\n\n class Proto(Protocol):\n @abstractmethod\n def first(self) -> int:\n raise NotImplementedError\n def second(self) -> int:\n return self.first() + 1\n\n def fun(arg: Proto) -> None:\n arg.second()\n\nThe question is should this be an error? We think most people would\nexpect this to be valid. Therefore, to be on the safe side, we need to\nrequire both methods to be implemented in implicit subclasses. In\naddition, if one looks at definitions in collections.abc, there are very\nfew methods that could be considered \"non-protocol\". Therefore, it was\ndecided to not introduce \"non-protocol\" methods.\n\nThere is only one downside to this: it will require some boilerplate for\nimplicit subtypes of \"large\" protocols. But, this doesn't apply to\n\"built-in\" protocols that are all \"small\" (i.e. have only few abstract\nmethods). Also, such style is discouraged for user-defined protocols. It\nis recommended to create compact protocols and combine them.\n\nMake protocols interoperable with other approaches\n\nThe protocols as described here are basically a minimal extension to the\nexisting concept of ABCs. We argue that this is the way they should be\nunderstood, instead of as something that replaces Zope interfaces, for\nexample. Attempting such interoperabilities will significantly\ncomplicate both the concept and the implementation.\n\nOn the other hand, Zope interfaces are conceptually a superset of\nprotocols defined here, but using an incompatible syntax to define them,\nbecause before PEP 526 there was no straightforward way to annotate\nattributes. In the 3.6+ world, zope.interface might potentially adopt\nthe Protocol syntax. In this case, type checkers could be taught to\nrecognize interfaces as protocols and make simple structural checks with\nrespect to them.\n\nUse assignments to check explicitly that a class implements a protocol\n\nIn the Go language the explicit checks for implementation are performed\nvia dummy assignments [golang]. Such a way is also possible with the\ncurrent proposal. Example:\n\n class A:\n def __len__(self) -> float:\n return ...\n\n _: Sized = A() # Error: A.__len__ doesn't conform to 'Sized'\n # (Incompatible return type 'float')\n\nThis approach moves the check away from the class definition and it\nalmost requires a comment as otherwise the code probably would not make\nany sense to an average reader -- it looks like dead code. Besides, in\nthe simplest form it requires one to construct an instance of A, which\ncould be problematic if this requires accessing or allocating some\nresources such as files or sockets. We could work around the latter by\nusing a cast, for example, but then the code would be ugly. Therefore,\nwe discourage the use of this pattern.\n\nSupport isinstance() checks by default\n\nThe problem with this is instance checks could be unreliable, except for\nsituations where there is a common signature convention such as\nIterable. For example:\n\n class P(Protocol):\n def common_method_name(self, x: int) -> int: ...\n\n class X:\n \n def common_method_name(self) -> None: ... # Note different signature\n\n def do_stuff(o: Union[P, X]) -> int:\n if isinstance(o, P):\n return o.common_method_name(1) # Results in TypeError not caught\n # statically if o is an X instance.\n\nAnother potentially problematic case is assignment of attributes after\ninstantiation:\n\n class P(Protocol):\n x: int\n\n class C:\n def initialize(self) -> None:\n self.x = 0\n\n c = C()\n isinstance(c, P) # False\n c.initialize()\n isinstance(c, P) # True\n\n def f(x: Union[P, int]) -> None:\n if isinstance(x, P):\n # Static type of x is P here.\n ...\n else:\n # Static type of x is int, but can be other type at runtime...\n print(x + 1)\n\n f(C()) # ...causing a TypeError.\n\nWe argue that requiring an explicit class decorator would be better,\nsince one can then attach warnings about problems like this in the\ndocumentation. The user would be able to evaluate whether the benefits\noutweigh the potential for confusion for each protocol and explicitly\nopt in -- but the default behavior would be safer. Finally, it will be\neasy to make this behavior default if necessary, while it might be\nproblematic to make it opt-in after being default.\n\nProvide a special intersection type construct\n\nThere was an idea to allow Proto = All[Proto1, Proto2, ...] as a\nshorthand for:\n\n class Proto(Proto1, Proto2, ..., Protocol):\n pass\n\nHowever, it is not yet clear how popular/useful it will be and\nimplementing this in type checkers for non-protocol classes could be\ndifficult. Finally, it will be very easy to add this later if needed.\n\nProhibit explicit subclassing of protocols by non-protocols\n\nThis was rejected for the following reasons:\n\n- Backward compatibility: People are already using ABCs, including\n generic ABCs from typing module. If we prohibit explicit subclassing\n of these ABCs, then quite a lot of code will break.\n- Convenience: There are existing protocol-like ABCs (that may be\n turned into protocols) that have many useful \"mix-in\" (non-abstract)\n methods. For example, in the case of Sequence one only needs to\n implement __getitem__ and __len__ in an explicit subclass, and one\n gets __iter__, __contains__, __reversed__, index, and count for\n free.\n- Explicit subclassing makes it explicit that a class implements a\n particular protocol, making subtyping relationships easier to see.\n- Type checkers can warn about missing protocol members or members\n with incompatible types more easily, without having to use hacks\n like dummy assignments discussed above in this section.\n- Explicit subclassing makes it possible to force a class to be\n considered a subtype of a protocol (by using # type: ignore together\n with an explicit base class) when it is not strictly compatible,\n such as when it has an unsafe override.\n\nCovariant subtyping of mutable attributes\n\nRejected because covariant subtyping of mutable attributes is not safe.\nConsider this example:\n\n class P(Protocol):\n x: float\n\n def f(arg: P) -> None:\n arg.x = 0.42\n\n class C:\n x: int\n\n c = C()\n f(c) # Would typecheck if covariant subtyping\n # of mutable attributes were allowed.\n c.x >> 1 # But this fails at runtime\n\nIt was initially proposed to allow this for practical reasons, but it\nwas subsequently rejected, since this may mask some hard to spot bugs.\n\nOverriding inferred variance of protocol classes\n\nIt was proposed to allow declaring protocols as invariant if they are\nactually covariant or contravariant (as it is possible for nominal\nclasses, see PEP 484). However, it was decided not to do this because of\nseveral downsides:\n\n- Declared protocol invariance breaks transitivity of sub-typing.\n Consider this situation:\n\n T = TypeVar('T')\n\n class P(Protocol[T]): # Protocol is declared as invariant.\n def meth(self) -> T:\n ...\n class C:\n def meth(self) -> float:\n ...\n class D(C):\n def meth(self) -> int:\n ...\n\n Now we have that D is a subtype of C, and C is a subtype of\n P[float]. But D is not a subtype of P[float] since D implements\n P[int], and P is invariant. There is a possibility to \"cure\" this by\n looking for protocol implementations in MROs but this will be too\n complex in a general case, and this \"cure\" requires abandoning\n simple idea of purely structural subtyping for protocols.\n\n- Subtyping checks will always require type inference for protocols.\n In the above example a user may complain: \"Why did you infer P[int]\n for my D? It implements P[float]!\". Normally, inference can be\n overruled by an explicit annotation, but here this will require\n explicit subclassing, defeating the purpose of using protocols.\n\n- Allowing overriding variance will make impossible more detailed\n error messages in type checkers citing particular conflicts in\n member type signatures.\n\n- Finally, explicit is better than implicit in this case. Requiring\n user to declare correct variance will simplify understanding the\n code and will avoid unexpected errors at the point of use.\n\nSupport adapters and adaptation\n\nAdaptation was proposed by PEP 246 (rejected) and is supported by\nzope.interface, see the Zope documentation on adapter registries.\nAdapters is quite an advanced concept, and PEP 484 supports unions and\ngeneric aliases that can be used instead of adapters. This can be\nillustrated with an example of Iterable protocol, there is another way\nof supporting iteration by providing __getitem__ and __len__. If a\nfunction supports both this way and the now standard __iter__ method,\nthen it could be annotated by a union type:\n\n class OldIterable(Sized, Protocol[T]):\n def __getitem__(self, item: int) -> T: ...\n\n CompatIterable = Union[Iterable[T], OldIterable[T]]\n\n class A:\n def __iter__(self) -> Iterator[str]: ...\n class B:\n def __len__(self) -> int: ...\n def __getitem__(self, item: int) -> str: ...\n\n def iterate(it: CompatIterable[str]) -> None:\n ...\n\n iterate(A()) # OK\n iterate(B()) # OK\n\nSince there is a reasonable alternative for such cases with existing\ntooling, it is therefore proposed not to include adaptation in this PEP.\n\nCall structural base types \"interfaces\"\n\n\"Protocol\" is a term already widely used in Python to describe duck\ntyping contracts such as the iterator protocol (providing __iter__ and\n__next__), and the descriptor protocol (providing __get__, __set__, and\n__delete__). In addition to this and other reasons given in\nspecification, protocols are different from Java interfaces in several\naspects: protocols don't require explicit declaration of implementation\n(they are mainly oriented on duck-typing), protocols can have default\nimplementations of members and store state.\n\nMake protocols special objects at runtime rather than normal ABCs\n\nMaking protocols non-ABCs will make the backwards compatibility\nproblematic if possible at all. For example, collections.abc.Iterable is\nalready an ABC, and lots of existing code use patterns like\nisinstance(obj, collections.abc.Iterable) and similar checks with other\nABCs (also in a structural manner, i.e., via __subclasshook__).\nDisabling this behavior will cause breakages. If we keep this behavior\nfor ABCs in collections.abc but will not provide a similar runtime\nbehavior for protocols in typing, then a smooth transition to protocols\nwill be not possible. In addition, having two parallel hierarchies may\ncause confusions.\n\nBackwards Compatibility\n\nThis PEP is fully backwards compatible.\n\nImplementation\n\nThe mypy type checker fully supports protocols (modulo a few known\nbugs). This includes treating all the builtin protocols, such as\nIterable structurally. The runtime implementation of protocols is\navailable in typing_extensions module on PyPI.\n\nReferences\n\nCopyright\n\nThis document has been placed in the public domain.\n\nabstract-classes\n\n https://docs.python.org/3/library/abc.html\n\ncollections-abc\n\n https://docs.python.org/3/library/collections.abc.html\n\ndata-model\n\n https://docs.python.org/3/reference/datamodel.html#special-method-names\n\ndiscussions\n\n https://github.com/python/typing/issues/11\n\nelsewhere\n\n https://github.com/python/peps/pull/224\n\ngolang\n\n https://golang.org/doc/effective_go.html#interfaces_and_types\n\nmypy\n\n http://github.com/python/mypy/\n\nseveral\n\n https://mail.python.org/pipermail/python-ideas/2015-September/thread.html#35859\n\ntypescript\n\n https://www.typescriptlang.org/docs/handbook/interfaces.html\n\ntypeshed\n\n https://github.com/python/typeshed/\n\ntyping\n\n https://docs.python.org/3/library/typing.html\n\nwiki-structural\n\n https://en.wikipedia.org/wiki/Structural_type_system\n\nzope-interfaces\n\n https://zopeinterface.readthedocs.io/en/latest/"},"source":{"kind":"string","value":"python-peps"},"added":{"kind":"string","value":"2024-10-18T13:23:33.158791"},"created":{"kind":"timestamp","value":"2017-03-05T00:00:00","string":"2017-03-05T00:00:00"},"metadata":{"kind":"string","value":"{\n \"license\": \"Public Domain\",\n \"url\": \"https://peps.python.org/pep-0544/\",\n \"authors\": [\n \"Ivan Levkivskyi\",\n \"Jukka Lehtosalo\",\n \"Łukasz Langa\"\n ],\n \"pep_number\": \"0544\",\n \"pandoc_version\": \"3.5\"\n}"}}},{"rowIdx":553,"cells":{"id":{"kind":"string","value":"0354"},"text":{"kind":"string","value":"PEP: 354 Title: Enumerations in Python Version: $Revision$\nLast-Modified: $Date$ Author: Ben Finney \nStatus: Superseded Type: Standards Track Content-Type: text/x-rst\nCreated: 20-Dec-2005 Python-Version: 2.6 Post-History: 20-Dec-2005\nSuperseded-By: 435\n\nRejection Notice\n\nThis PEP has been rejected. This doesn't slot nicely into any of the\nexisting modules (like collections), and the Python standard library\neschews having lots of individual data structures in their own modules.\nAlso, the PEP has generated no widespread interest. For those who need\nenumerations, there are cookbook recipes and PyPI packages that meet\nthese needs.\n\nNote: this PEP was superseded by PEP 435, which has been accepted in May\n2013.\n\nAbstract\n\nThis PEP specifies an enumeration data type for Python.\n\nAn enumeration is an exclusive set of symbolic names bound to arbitrary\nunique values. Values within an enumeration can be iterated and\ncompared, but the values have no inherent relationship to values outside\nthe enumeration.\n\nMotivation\n\nThe properties of an enumeration are useful for defining an immutable,\nrelated set of constant values that have a defined sequence but no\ninherent semantic meaning. Classic examples are days of the week (Sunday\nthrough Saturday) and school assessment grades ('A' through 'D', and\n'F'). Other examples include error status values and states within a\ndefined process.\n\nIt is possible to simply define a sequence of values of some other basic\ntype, such as int or str, to represent discrete arbitrary values.\nHowever, an enumeration ensures that such values are distinct from any\nothers, and that operations without meaning (\"Wednesday times two\") are\nnot defined for these values.\n\nSpecification\n\nAn enumerated type is created from a sequence of arguments to the type's\nconstructor:\n\n >>> Weekdays = enum('sun', 'mon', 'tue', 'wed', 'thu', 'fri', 'sat')\n >>> Grades = enum('A', 'B', 'C', 'D', 'F')\n\nEnumerations with no values are meaningless. The exception\nEnumEmptyError is raised if the constructor is called with no value\narguments.\n\nThe values are bound to attributes of the new enumeration object:\n\n >>> today = Weekdays.mon\n\nThe values can be compared:\n\n >>> if today == Weekdays.fri:\n ... print \"Get ready for the weekend\"\n\nValues within an enumeration cannot be meaningfully compared except with\nvalues from the same enumeration. The comparison operation functions\nreturn NotImplemented[1] when a value from an enumeration is compared\nagainst any value not from the same enumeration or of a different type:\n\n >>> gym_night = Weekdays.wed\n >>> gym_night.__cmp__(Weekdays.mon)\n 1\n >>> gym_night.__cmp__(Weekdays.wed)\n 0\n >>> gym_night.__cmp__(Weekdays.fri)\n -1\n >>> gym_night.__cmp__(23)\n NotImplemented\n >>> gym_night.__cmp__(\"wed\")\n NotImplemented\n >>> gym_night.__cmp__(Grades.B)\n NotImplemented\n\nThis allows the operation to succeed, evaluating to a boolean value:\n\n >>> gym_night = Weekdays.wed\n >>> gym_night < Weekdays.mon\n False\n >>> gym_night < Weekdays.wed\n False\n >>> gym_night < Weekdays.fri\n True\n >>> gym_night < 23\n False\n >>> gym_night > 23\n True\n >>> gym_night > \"wed\"\n True\n >>> gym_night > Grades.B\n True\n\nCoercing a value from an enumeration to a str results in the string that\nwas specified for that value when constructing the enumeration:\n\n >>> gym_night = Weekdays.wed\n >>> str(gym_night)\n 'wed'\n\nThe sequence index of each value from an enumeration is exported as an\ninteger via that value's index attribute:\n\n >>> gym_night = Weekdays.wed\n >>> gym_night.index\n 3\n\nAn enumeration can be iterated, returning its values in the sequence\nthey were specified when the enumeration was created:\n\n >>> print [str(day) for day in Weekdays]\n ['sun', 'mon', 'tue', 'wed', 'thu', 'fri', 'sat']\n\nValues from an enumeration are hashable, and can be used as dict keys:\n\n >>> plans = {}\n >>> plans[Weekdays.sat] = \"Feed the horse\"\n\nThe normal usage of enumerations is to provide a set of possible values\nfor a data type, which can then be used to map to other information\nabout the values:\n\n >>> for report_grade in Grades:\n ... report_students[report_grade] = \\\n ... [s for s in students if students.grade == report_grade]\n\nRationale -- Other designs considered\n\nAll in one class\n\nSome implementations have the enumeration and its values all as\nattributes of a single object or class.\n\nThis PEP specifies a design where the enumeration is a container, and\nthe values are simple comparables. It was felt that attempting to place\nall the properties of enumeration within a single class complicates the\ndesign without apparent benefit.\n\nMetaclass for creating enumeration classes\n\nThe enumerations specified in this PEP are instances of an enum type.\nSome alternative designs implement each enumeration as its own class,\nand a metaclass to define common properties of all enumerations.\n\nOne motivation for having a class (rather than an instance) for each\nenumeration is to allow subclasses of enumerations, extending and\naltering an existing enumeration. A class, though, implies that\ninstances of that class will be created; it is difficult to imagine what\nit means to have separate instances of a \"days of the week\" class, where\neach instance contains all days. This usually leads to having each class\nfollow the Singleton pattern, further complicating the design.\n\nIn contrast, this PEP specifies enumerations that are not expected to be\nextended or modified. It is, of course, possible to create a new\nenumeration from the string values of an existing one, or even subclass\nthe enum type if desired.\n\nValues related to other types\n\nSome designs express a strong relationship to some other value, such as\na particular integer or string, for each enumerated value.\n\nThis results in using such values in contexts where the enumeration has\nno meaning, and unnecessarily complicates the design. The enumerated\nvalues specified in this PEP export the values used to create them, and\ncan be compared for equality with any other value, but sequence\ncomparison with values outside the enumeration is explicitly not\nimplemented.\n\nHiding attributes of enumerated values\n\nA previous design had the enumerated values hiding as much as possible\nabout their implementation, to the point of not exporting the string key\nand sequence index.\n\nThe design in this PEP acknowledges that programs will often find it\nconvenient to know the enumerated value's enumeration type, sequence\nindex, and string key specified for the value. These are exported by the\nenumerated value as attributes.\n\nImplementation\n\nThis design is based partly on a recipe[2] from the Python Cookbook.\n\nThe PyPI package enum[3] provides a Python implementation of the data\ntypes described in this PEP.\n\nReferences and Footnotes\n\nCopyright\n\nThis document has been placed in the public domain.\n\n\f\n\n Local Variables: mode: indented-text indent-tabs-mode: nil\n sentence-end-double-space: t fill-column: 70 End:\n\n[1] The NotImplemented return value from comparison operations signals\nthe Python interpreter to attempt alternative comparisons or other\nfallbacks.\n\n\n[2] \"First Class Enums in Python\", Zoran Isailovski, Python Cookbook\nrecipe 413486\n\n\n[3] Python Package Index, package enum\n"},"source":{"kind":"string","value":"python-peps"},"added":{"kind":"string","value":"2024-10-18T13:23:33.171673"},"created":{"kind":"timestamp","value":"2005-12-20T00:00:00","string":"2005-12-20T00:00:00"},"metadata":{"kind":"string","value":"{\n \"license\": \"Public Domain\",\n \"url\": \"https://peps.python.org/pep-0354/\",\n \"authors\": [\n \"Ben Finney\"\n ],\n \"pep_number\": \"0354\",\n \"pandoc_version\": \"3.5\"\n}"}}},{"rowIdx":554,"cells":{"id":{"kind":"string","value":"0392"},"text":{"kind":"string","value":"PEP: 392 Title: Python 3.2 Release Schedule Version: $Revision$\nLast-Modified: $Date$ Author: Georg Brandl Status:\nFinal Type: Informational Topic: Release Content-Type: text/x-rst\nCreated: 30-Dec-2009 Python-Version: 3.2\n\nAbstract\n\nThis document describes the development and release schedule for the\nPython 3.2 series. The schedule primarily concerns itself with PEP-sized\nitems.\n\nRelease Manager and Crew\n\n- 3.2 Release Manager: Georg Brandl\n- Windows installers: Martin v. Loewis\n- Mac installers: Ronald Oussoren\n- Documentation: Georg Brandl\n\n3.2 Lifespan\n\n3.2 will receive bugfix updates approximately every 4-6 months for\napproximately 18 months. After the release of 3.3.0 final (see PEP 398),\na final 3.2 bugfix update will be released. After that, security updates\n(source only) will be released until 5 years after the release of 3.2\nfinal, which was planned for February 2016.\n\nAs of 2016-02-20, Python 3.2.x reached end-of-life status. The final\nsource release was 3.2.6 in October 2014.\n\nRelease Schedule\n\n3.2 schedule\n\n- 3.2 alpha 1: August 1, 2010\n- 3.2 alpha 2: September 6, 2010\n- 3.2 alpha 3: October 12, 2010\n- 3.2 alpha 4: November 16, 2010\n- 3.2 beta 1: December 6, 2010\n\n(No new features beyond this point.)\n\n- 3.2 beta 2: December 20, 2010\n- 3.2 candidate 1: January 16, 2011\n- 3.2 candidate 2: January 31, 2011\n- 3.2 candidate 3: February 14, 2011\n- 3.2 final: February 20, 2011\n\n3.2.1 schedule\n\n- 3.2.1 beta 1: May 8, 2011\n- 3.2.1 candidate 1: May 17, 2011\n- 3.2.1 candidate 2: July 3, 2011\n- 3.2.1 final: July 11, 2011\n\n3.2.2 schedule\n\n- 3.2.2 candidate 1: August 14, 2011\n- 3.2.2 final: September 4, 2011\n\n3.2.3 schedule\n\n- 3.2.3 candidate 1: February 25, 2012\n- 3.2.3 candidate 2: March 18, 2012\n- 3.2.3 final: April 11, 2012\n\n3.2.4 schedule\n\n- 3.2.4 candidate 1: March 23, 2013\n- 3.2.4 final: April 6, 2013\n\n3.2.5 schedule (regression fix release)\n\n- 3.2.5 final: May 13, 2013\n\n-- Only security releases after 3.2.5 --\n\n3.2.6 schedule\n\n- 3.2.6 candidate 1 (source-only release): October 4, 2014\n- 3.2.6 final (source-only release): October 11, 2014\n\nFeatures for 3.2\n\nNote that PEP 3003 is in effect: no changes to language syntax and no\nadditions to the builtins may be made.\n\nNo large-scale changes have been recorded yet.\n\nCopyright\n\nThis document has been placed in the public domain.\n\n\f\n\n Local Variables: mode: indented-text indent-tabs-mode: nil\n sentence-end-double-space: t fill-column: 70 coding: utf-8 End:"},"source":{"kind":"string","value":"python-peps"},"added":{"kind":"string","value":"2024-10-18T13:23:33.184869"},"created":{"kind":"timestamp","value":"2009-12-30T00:00:00","string":"2009-12-30T00:00:00"},"metadata":{"kind":"string","value":"{\n \"license\": \"Public Domain\",\n \"url\": \"https://peps.python.org/pep-0392/\",\n \"authors\": [\n \"Georg Brandl\"\n ],\n \"pep_number\": \"0392\",\n \"pandoc_version\": \"3.5\"\n}"}}},{"rowIdx":555,"cells":{"id":{"kind":"string","value":"0711"},"text":{"kind":"string","value":"PEP: 711 Title: PyBI: a standard format for distributing Python Binaries\nAuthor: Nathaniel J. Smith PEP-Delegate: TODO\nDiscussions-To:\nhttps://discuss.python.org/t/pep-711-pybi-a-standard-format-for-distributing-python-binaries/25547\nStatus: Draft Type: Standards Track Topic: Packaging Content-Type:\ntext/x-rst Created: 06-Apr-2023 Post-History: 06-Apr-2023\n\nAbstract\n\n“Like wheels, but instead of a pre-built python package, it’s a\npre-built python interpreter”\n\nMotivation\n\nEnd goal: Pypi.org has pre-built packages for all Python versions on all\npopular platforms, so automated tools can easily grab any of them and\nset it up. It becomes quick and easy to try Python prereleases, pin\nPython versions in CI, make a temporary environment to reproduce a bug\nreport that only happens on a specific Python point release, etc.\n\nFirst step (this PEP): define a standard packaging file format to hold\npre-built Python interpreters, that reuses existing Python packaging\nstandards as much as possible.\n\nExamples\n\nExample pybi builds are available at pybi.vorpus.org. They're zip files,\nso you can unpack them and poke around inside if you want to get a feel\nfor how they're laid out.\n\nYou can also look at the tooling I used to create them.\n\nSpecification\n\nFilename\n\nFilename: {distribution}-{version}[-{build tag}]-{platform tag}.pybi\n\nThis matches the wheel file format defined in PEP 427, except dropping\nthe {python tag} and {abi tag} and changing the extension from .whl →\n.pybi.\n\nFor example:\n\n- cpython-3.9.3-manylinux_2014.pybi\n- cpython-3.10b2-win_amd64.pybi\n\nJust like for wheels, if a pybi supports multiple platforms, you can\nseparate them by dots to make a “compressed tag set”:\n\n- cpython-3.9.5-macosx_11_0_x86_64.macosx_11_0_arm64.pybi\n\n(Though in practice this probably won’t be used much, e.g. the above\nfilename is more idiomatically written as\ncpython-3.9.5-macosx_11_0_universal2.pybi.)\n\nFile contents\n\nA .pybi file is a zip file, that can be unpacked directly into an\narbitrary location and then used as a self-contained Python environment.\nThere’s no .data directory or install scheme keys, because the Python\nenvironment knows which install scheme it’s using, so it can just put\nthings in the right places to start with.\n\nThe “arbitrary location” part is important: the pybi can’t contain any\nhardcoded absolute paths. In particular, any preinstalled scripts MUST\nNOT embed absolute paths in their shebang lines.\n\nSimilar to wheels’ -.dist-info directory, the pybi\narchive must contain a top-level directory named pybi-info/. (Rationale:\ncalling it pybi-info instead dist-info makes sure that tools don’t get\nconfused about which kind of metadata they’re looking at; leaving off\nthe {name}-{version} part is fine because only one pybi can be installed\ninto a given directory.) The pybi-info/ directory contains at least the\nfollowing files:\n\n- .../PYBI: metadata about the archive itself, in the same RFC822-ish\n format as METADATA and WHEEL files:\n\n Pybi-Version: 1.0\n Generator: {name} {version}\n Tag: {platform tag}\n Tag: {another platform tag}\n Tag: {...and so on...}\n Build: 1 # optional\n\n- .../RECORD: same as in wheels, except see the note about symlinks,\n below.\n\n- .../METADATA: In the same format as described in the current core\n metadata spec, except that the following keys are forbidden because\n they don’t make sense:\n\n - Requires-Dist\n - Provides-Extra\n - Requires-Python\n\n And also there are some new, required keys described below.\n\nPybi-specific core metadata\n\nHere's an example of the new METADATA fields, before we give the full\ndetails:\n\n Pybi-Environment-Marker-Variables: {\"implementation_name\": \"cpython\", \"implementation_version\": \"3.10.8\", \"os_name\": \"posix\", \"platform_machine\": \"x86_64\", \"platform_system\": \"Linux\", \"python_full_version\": \"3.10.8\", \"platform_python_implementation\": \"CPython\", \"python_version\": \"3.10\", \"sys_platform\": \"linux\"}\n Pybi-Paths: {\"stdlib\": \"lib/python3.10\", \"platstdlib\": \"lib/python3.10\", \"purelib\": \"lib/python3.10/site-packages\", \"platlib\": \"lib/python3.10/site-packages\", \"include\": \"include/python3.10\", \"platinclude\": \"include/python3.10\", \"scripts\": \"bin\", \"data\": \".\"}\n Pybi-Wheel-Tag: cp310-cp310-PLATFORM\n Pybi-Wheel-Tag: cp310-abi3-PLATFORM\n Pybi-Wheel-Tag: cp310-none-PLATFORM\n Pybi-Wheel-Tag: cp39-abi3-PLATFORM\n Pybi-Wheel-Tag: cp38-abi3-PLATFORM\n Pybi-Wheel-Tag: cp37-abi3-PLATFORM\n Pybi-Wheel-Tag: cp36-abi3-PLATFORM\n Pybi-Wheel-Tag: cp35-abi3-PLATFORM\n Pybi-Wheel-Tag: cp34-abi3-PLATFORM\n Pybi-Wheel-Tag: cp33-abi3-PLATFORM\n Pybi-Wheel-Tag: cp32-abi3-PLATFORM\n Pybi-Wheel-Tag: py310-none-PLATFORM\n Pybi-Wheel-Tag: py3-none-PLATFORM\n Pybi-Wheel-Tag: py39-none-PLATFORM\n Pybi-Wheel-Tag: py38-none-PLATFORM\n Pybi-Wheel-Tag: py37-none-PLATFORM\n Pybi-Wheel-Tag: py36-none-PLATFORM\n Pybi-Wheel-Tag: py35-none-PLATFORM\n Pybi-Wheel-Tag: py34-none-PLATFORM\n Pybi-Wheel-Tag: py33-none-PLATFORM\n Pybi-Wheel-Tag: py32-none-PLATFORM\n Pybi-Wheel-Tag: py31-none-PLATFORM\n Pybi-Wheel-Tag: py30-none-PLATFORM\n Pybi-Wheel-Tag: py310-none-any\n Pybi-Wheel-Tag: py3-none-any\n Pybi-Wheel-Tag: py39-none-any\n Pybi-Wheel-Tag: py38-none-any\n Pybi-Wheel-Tag: py37-none-any\n Pybi-Wheel-Tag: py36-none-any\n Pybi-Wheel-Tag: py35-none-any\n Pybi-Wheel-Tag: py34-none-any\n Pybi-Wheel-Tag: py33-none-any\n Pybi-Wheel-Tag: py32-none-any\n Pybi-Wheel-Tag: py31-none-any\n Pybi-Wheel-Tag: py30-none-any\n\nSpecification:\n\n- Pybi-Environment-Marker-Variables: The value of all PEP 508\n environment marker variables that are static across installs of this\n Pybi, as a JSON dict. So for example:\n\n - python_version will always be present, because a Python 3.10\n package always has python_version == \"3.10\".\n\n - platform_version will generally not be present, because it gives\n detailed information about the OS where Python is running, for\n example:\n\n #60-Ubuntu SMP Thu May 6 07:46:32 UTC 2021\n\n platform_release has similar issues.\n\n - platform_machine will usually be present, except for macOS\n universal2 pybis: these can potentially be run in either x86-64\n or arm64 mode, and we don't know which until the interpreter is\n actually invoked, so we can't record it in static metadata.\n\n Rationale: In many cases, this should allow a resolver running on\n Linux to compute package pins for a Python environment on Windows,\n or vice-versa, so long as the resolver has access to the target\n platform’s .pybi file. (Note that Requires-Python constraints can be\n checked by using the python_full_version value.) While we have to\n leave out a few keys sometimes, they're either fairly useless\n (platform_version, platform_release) or can be reconstructed by the\n resolver (platform_machine).\n\n The markers are also just generally useful information to have\n accessible. For example, if you have a pypy3-7.3.2 pybi, and you\n want to know what version of the Python language that supports, then\n that’s recorded in the python_version marker.\n\n (Note: we may want to deprecate/remove platform_version and\n platform_release? They're problematic and I can't figure out any\n cases where they're useful. But that's out of scope of this\n particular PEP.)\n\n- Pybi-Paths: The install paths needed to install wheels (same keys as\n sysconfig.get_paths()), as relative paths starting at the root of\n the zip file, as a JSON dict.\n\n These paths MUST be written in Unix format, using forward slashes as\n a separator, not backslashes.\n\n It must be possible to invoke the Python interpreter by running\n {paths[\"scripts\"]}/python. If there are alternative interpreter\n entry points (e.g. pythonw for Windows GUI apps), then they should\n also be in that directory under their conventional names, with no\n version number attached. (You can also have a python3.11 symlink if\n you want; there’s no rule against that. It’s just that python has to\n exist and work.)\n\n Rationale: Pybi-Paths and Pybi-Wheel-Tags (see below) are together\n enough to let an installer choose wheels and install them into an\n unpacked pybi environment, without invoking Python. Besides, we need\n to write down the interpreter location somewhere, so it’s two birds\n with one stone.\n\n- Pybi-Wheel-Tag: The wheel tags supported by this interpreter, in\n preference order (most-preferred first, least-preferred last),\n except that the special platform tag PLATFORM should replace any\n platform tags that depend on the final installation system.\n\n Discussion: It would be nice™ if installers could compute a pybi’s\n corresponding wheel tags ahead of time, so that they could install\n wheels into the unpacked pybi without needing to actually invoke the\n python interpreter to query its tags – both for efficiency and to\n allow for more exotic use cases like setting up a Windows\n environment from a Linux host.\n\n But unfortunately, it’s impossible to compute the full set of\n platform tags supported by a Python installation ahead of time,\n because they can depend on the final system:\n\n - A pybi tagged manylinux_2_12_x86_64 can always use wheels tagged\n as manylinux_2_12_x86_64. It also might be able to use wheels\n tagged manylinux_2_17_x86_64, but only if the final installation\n system has glibc 2.17+.\n - A pybi tagged macosx_11_0_universal2 (= x86-64 + arm64 support\n in the same binary) might be able to use wheels tagged as\n macosx_11_0_arm64, but only if it’s installed on an “Apple\n Silicon” machine and running in arm64 mode.\n\n In these two cases, an installation tool can still work out the\n appropriate set of wheel tags by computing the local platform tags,\n taking the wheel tag templates from Pybi-Wheel-Tag, and swapping in\n the actual supported platforms in place of the magic PLATFORM\n string.\n\n However, there are other cases that are even more complicated:\n\n - \n\n You can (usually) run both 32- and 64-bit apps on 64-bit Windows. So a pybi\n\n installer might compute the set of allowable pybi tags on\n the current platform as [win32, win_amd64]. But you can’t\n then just take that set and swap it into the pybi’s wheel\n tag template or you get nonsense:\n\n [\n \"cp39-cp39-win32\",\n \"cp39-cp39-win_amd64\",\n \"cp39-abi3-win32\",\n \"cp39-abi3-win_amd64\",\n ...\n ]\n\n To handle this, the installer needs to somehow understand\n that a manylinux_2_12_x86_64 pybi can use a\n manylinux_2_17_x86_64 wheel as long as those are both valid\n tags on the current machine, but a win32 pybi can’t use a\n win_amd64 wheel, even if those are both valid tags on the\n current machine.\n\n - A pybi tagged macosx_11_0_universal2 might be able to use wheels\n tagged as macosx_11_0_x86_64, but only if it’s installed on an\n x86-64 machine or it’s installed on an ARM machine and the\n interpreter is invoked with the magic incantation that tells\n macOS to run a binary in x86-64 mode. So how the installer plans\n to invoke the pybi matters too!\n\n So actually using Pybi-Wheel-Tag values is less trivial than it\n might seem, and they’re probably only useful with fairly\n sophisticated tooling. But, smart pybi installers will already have\n to understand a lot of these platform compatibility issues in order\n to select a working pybi, and for the cross-platform\n pinning/environment building case, users can potentially provide\n whatever information is needed to disambiguate exactly what platform\n they’re targeting. So, it’s still useful enough to include in the\n PyBI metadata -- tools that don't find it useful can simply ignore\n it.\n\nYou can probably generate these metadata values by running this script\non the built interpreter:\n\n import packaging.markers\n import packaging.tags\n import sysconfig\n import os.path\n import json\n import sys\n\n marker_vars = packaging.markers.default_environment()\n # Delete any keys that depend on the final installation\n del marker_vars[\"platform_release\"]\n del marker_vars[\"platform_version\"]\n # Darwin binaries are often multi-arch, so play it safe and\n # delete the architecture marker. (Better would be to only\n # do this if the pybi actually is multi-arch.)\n if marker_vars[\"sys_platform\"] == \"darwin\":\n del marker_vars[\"platform_machine\"]\n\n # Copied and tweaked version of packaging.tags.sys_tags\n tags = []\n interp_name = packaging.tags.interpreter_name()\n if interp_name == \"cp\":\n tags += list(packaging.tags.cpython_tags(platforms=[\"xyzzy\"]))\n else:\n tags += list(packaging.tags.generic_tags(platforms=[\"xyzzy\"]))\n\n tags += list(packaging.tags.compatible_tags(platforms=[\"xyzzy\"]))\n\n # Gross hack: packaging.tags normalizes platforms by lowercasing them,\n # so we generate the tags with a unique string and then replace it\n # with our special uppercase placeholder.\n str_tags = [str(t).replace(\"xyzzy\", \"PLATFORM\") for t in tags]\n\n (base_path,) = sysconfig.get_config_vars(\"installed_base\")\n # For some reason, macOS framework builds report their\n # installed_base as a directory deep inside the framework.\n while \"Python.framework\" in base_path:\n base_path = os.path.dirname(base_path)\n paths = {key: os.path.relpath(path, base_path).replace(\"\\\\\", \"/\") for (key, path) in sysconfig.get_paths().items()}\n\n json.dump({\"marker_vars\": marker_vars, \"tags\": str_tags, \"paths\": paths}, sys.stdout)\n\nThis emits a JSON dict on stdout with separate entries for each set of\npybi-specific tags.\n\nSymlinks\n\nCurrently, symlinks are used by default in all Unix Python installs\n(e.g., bin/python3 -> bin/python3.9). And furthermore, symlinks are\nrequired to store macOS framework builds in .pybi files. So, unlike\nwheel files, we absolutely have to support symlinks in .pybi files for\nthem to be useful at all.\n\nRepresenting symlinks in zip files\n\nThe de-facto standard for representing symlinks in zip files is the\nInfo-Zip symlink extension, which works as follows:\n\n- The symlink’s target path is stored as if it were the file contents\n- The top 4 bits of the Unix permissions field are set to 0xa, i.e.:\n permissions & 0xf000 == 0xa000\n- The Unix permissions field, in turn, is stored as the top 16 bits of\n the “external attributes” field.\n\nSo if using Python’s zipfile module, you can check whether a ZipInfo\nrepresents a symlink by doing:\n\n (zip_info.external_attr >> 16) & 0xf000 == 0xa000\n\nOr if using Rust’s zip crate, the equivalent check is:\n\n fn is_symlink(zip_file: &zip::ZipFile) -> bool {\n match zip_file.unix_mode() {\n Some(mode) => mode & 0xf000 == 0xa000,\n None => false,\n }\n }\n\nIf you’re on Unix, your zip and unzip commands probably understands this\nformat already.\n\nRepresenting symlinks in RECORD files\n\nNormally, a RECORD file lists each file + its hash + its length:\n\n my/favorite/file,sha256=...,12345\n\nFor symlinks, we instead write:\n\n name/of/symlink,symlink=path/to/symlink/target,\n\nThat is: we use a special “hash function” called symlink, and then store\nthe actual symlink target as the “hash value”. And the length is left\nempty.\n\nRationale: we’re already committed to the RECORD file containing a\nredundant check on everything in the main archive, so for symlinks we at\nleast need to store some kind of hash, plus some kind of flag to\nindicate that this is a symlink. Given that symlink target strings are\nroughly the same size as a hash, we might as well store them directly.\nThis also makes the symlink information easier to access for tools that\ndon’t understand the Info-Zip symlink extension, and makes it possible\nto losslessly unpack and repack a Unix pybi on a Windows system, which\nsomeone might find handy at some point.\n\nStoring symlinks in pybi files\n\nWhen a pybi creator stores a symlink, they MUST use both of the\nmechanisms defined above: storing it in the zip archive directly using\nthe Info-Zip representation, and also recording it in the RECORD file.\n\nPybi consumers SHOULD validate that the symlinks in the archive and\nRECORD file are consistent with each other.\n\nWe also considered using only the RECORD file to store symlinks, but\nthen the vanilla unzip tool wouldn’t be able to unpack them, and that\nwould make it hard to install a pybi from a shell script.\n\nLimitations\n\nSymlinks enable a lot of potential messiness. To keep things under\ncontrol, we impose the following restrictions:\n\n- Symlinks MUST NOT be used in .pybis targeting Windows, or other\n platforms that are missing first-class symlink support.\n\n- Symlinks MUST NOT be used inside the pybi-info directory.\n (Rationale: there’s no need, and it makes things simpler for\n resolvers that need to extract info from pybi-info without unpacking\n the whole archive.)\n\n- Symlink targets MUST be relative paths, and MUST be inside the pybi\n directory.\n\n- If A/B/... is recorded as a symlink in the archive, then there MUST\n NOT be any other entries in the archive named like A/B/.../C.\n\n For example, if an archive has a symlink foo -> bar, and then later\n in the archive there’s a regular file named foo/blah.py, then a\n naive unpacker could potentially end up writing a file called\n bar/blah.py. Don’t be naive.\n\nUnpackers MUST verify that these rules are followed, because without\nthem attackers could create evil symlinks like foo -> /etc/passwd or\nfoo -> ../../../../../etc + foo/passwd -> ... and cause havoc.\n\nNon-normative comments\n\nWhy not just use conda?\n\nThis isn't really in the scope of this PEP, but since conda is a popular\nway to distribute binary Python interpreters, it's a natural question.\n\nThe simple answer is: conda is great! But, there are lots of python\nusers who aren't conda users, and they deserve nice things too. This PEP\njust gives them another option.\n\nThe deeper answer is: the maintainers who upload packages to PyPI are\nthe backbone of the Python ecosystem. They're the first audience for\nPython packaging tools. And one thing they want is to upload a package\nonce, and have it be accessible across all the different ways Python is\ndeployed: in Debian and Fedora and Homebrew and FreeBSD, in Conda\nenvironments, in big companies' monorepos, in Nix, in Blender plugins,\nin RenPy games, ..... you get the idea.\n\nAll of these environments have their own tooling and strategies for\nmanaging packages and dependencies. So what's special about PyPI and\nwheels is that they're designed to describe dependencies in a standard,\nabstract way, that all these downstream systems can consume and convert\ninto their local conventions. That's why package maintainers use\nPython-specific metadata and upload to PyPI: because it lets them\naddress all of those systems simultaneously. Every time you build a\nPython package for conda, there's an intermediate wheel that's\ngenerated, because wheels are the common language that Python package\nbuild systems and conda can use to talk to each other.\n\nBut then, if you're a maintainer releasing an sdist+wheels, then you\nnaturally want to test what you're releasing, which may depend on\narbitrary PyPI packages and versions. So you need tools that build\nPython environments directly from PyPI, and conda is fundamentally not\ndesigned to do that. So conda and pip are both necessary for different\ncases, and this proposal happens to be targeting the pip side of that\nequation.\n\nSdists (or not)\n\nIt might be cool to have an “sdist” equivalent for pybis, i.e., some\nkind of format for a Python source release that’s structured-enough to\nlet tools automatically fetch and build it into a pybi, for platforms\nwhere prebuilt pybis aren’t available. But, this isn’t necessary for the\nMVP and opens a can of worms, so let’s worry about it later.\n\nWhat packages should be bundled inside a pybi?\n\nPybi builders have the power to pick and choose what exactly goes\ninside. For example, you could include some preinstalled packages in the\npybi’s site-packages directory, or prune out bits of the stdlib that you\ndon’t want. We can’t stop you! Though if you do preinstall packages,\nthen it's strongly recommended to also include the correct metadata\n(.dist-info etc.), so that it’s possible for Pip or other tools to\nunderstand out what’s going on.\n\nFor my prototype “general purpose” pybi’s, what I chose is:\n\n- Make sure site-packages is empty.\n\n Rationale: for traditional standalone python installers that are\n targeted at end-users, you probably want to include at least pip, to\n avoid bootstrapping issues (PEP 453). But pybis are different:\n they’re designed to be installed by “smart” tooling, that consume\n the pybi as part of some kind of larger automated deployment\n process. It’s easier for these installers to start from a blank\n slate and then add whatever they need, than for them to start with\n some preinstalled packages that they may or may not want. (And\n besides, you can still run python -m ensurepip.)\n\n- Include the full stdlib, except for test.\n\n Rationale: the top-level test module contains CPython’s own test\n suite. It’s huge (CPython without test is ~37 MB, then test adds\n another ~25 MB on top of that!), and essentially never used by\n regular user code. Also, as precedent, the official nuget packages,\n the official manylinux images, and multiple Linux distributions all\n leave it out, and this hasn’t caused any major problems.\n\n So this seems like the best way to balance broad compatibility with\n reasonable download/install sizes.\n\n- I’m not shipping any .pyc files. They take up space in the download,\n can be generated on the final system at minimal cost, and dropping\n them removes a source of location-dependence. (.pyc files store the\n absolute path of the corresponding .py file and include it in\n tracebacks; but, pybis are relocatable, so the correct path isn’t\n known until after install.)\n\nBackwards Compatibility\n\nNo backwards compatibility considerations.\n\nSecurity Implications\n\nNo security implications, beyond the fact that anyone who takes it upon\nthemselves to distribute binaries has to come up with a plan to manage\ntheir security (e.g., whether they roll a new build after an OpenSSL CVE\ndrops). But collectively, we core Python folks are already maintaining\nbinary builds for all major platforms (macOS + Windows through\npython.org, and Linux builds through the official manylinux image), so\neven if we do start releasing official CPython builds on PyPI it doesn't\nreally raise any new security issues.\n\nHow to Teach This\n\nThis isn't targeted at end-users; their experience will simply be that\ne.g. their pyenv or tox invocation magically gets faster and more\nreliable (if those projects' maintainers decide to take advantage of\nthis PEP).\n\nCopyright\n\nThis document is placed in the public domain or under the\nCC0-1.0-Universal license, whichever is more permissive."},"source":{"kind":"string","value":"python-peps"},"added":{"kind":"string","value":"2024-10-18T13:23:33.221913"},"created":{"kind":"timestamp","value":"2023-04-06T00:00:00","string":"2023-04-06T00:00:00"},"metadata":{"kind":"string","value":"{\n \"license\": \"Public Domain\",\n \"url\": \"https://peps.python.org/pep-0711/\",\n \"authors\": [\n \"Nathaniel J. Smith\"\n ],\n \"pep_number\": \"0711\",\n \"pandoc_version\": \"3.5\"\n}"}}},{"rowIdx":556,"cells":{"id":{"kind":"string","value":"0566"},"text":{"kind":"string","value":"PEP: 566 Title: Metadata for Python Software Packages 2.1 Author: Dustin\nIngram BDFL-Delegate: Daniel Holth Discussions-To:\ndistutils-sig@python.org Status: Final Type: Standards Track Topic:\nPackaging Content-Type: text/x-rst Created: 01-Dec-2017 Python-Version:\n3.x Post-History: Replaces: 345 Resolution:\nhttps://mail.python.org/pipermail/distutils-sig/2018-February/032014.html\n\npackaging:core-metadata\n\nAbstract\n\nThis PEP describes the changes between versions 1.2 and 2.1 of the core\nmetadata specification for Python packages. Version 1.2 is specified in\nPEP 345.\n\nIt also changes to the canonical source for field specifications to the\nCore Metadata Specification reference document, which includes specifics\nof the field names, and their semantics and usage.\n\nFields\n\nThe canonical source for the names and semantics of each of the\nsupported metadata fields is the Core Metadata Specification document.\n\nFields marked with \"(Multiple use)\" may be specified multiple times in a\nsingle PKG-INFO file. Other fields may only occur once in a PKG-INFO\nfile. Fields marked with \"(optional)\" are not required to appear in a\nvalid PKG-INFO file; all other fields must be present.\n\nNew in Version 2.1\n\nDescription-Content-Type (optional)\n\nA string stating the markup syntax (if any) used in the distribution's\ndescription, so that tools can intelligently render the description.\n\nHistorically, tools like PyPI assume that a package's description is\nformatted in reStructuredText (reST), and fall back on plain text if the\ndescription is not valid reST.\n\nThe introduction of this field allows PyPI to support additional types\nof markup syntax, and not need to make this assumption.\n\nThe full specification for this field is defined in the Core Metadata\nSpecification.\n\nProvides-Extra (optional, multiple use)\n\nA string containing the name of an optional feature. Must be a valid\nPython identifier. May be used to make a dependency conditional on\nwhether the optional feature has been requested.\n\nThis introduction of this field allows package installation tools (such\nas pip) to determine which extras are provided by a given package, and\nso that package publication tools (such as twine) can check for issues\nwith environment markers which use extras.\n\nThe full specification for this field is defined in the Core Metadata\nSpecification.\n\nChanged in Version 2.1\n\nName\n\nThe specification for the format of this field is now identical to the\ndistribution name specification defined in PEP 508.\n\nDescription\n\nIn addition to the Description header field, the distribution's\ndescription may instead be provided in the message body (i.e., after a\ncompletely blank line following the headers, with no indentation or\nother special formatting necessary).\n\nVersion Specifiers\n\nVersion numbering requirements and the semantics for specifying\ncomparisons between versions are defined in PEP 440. Direct references\nas defined in PEP 440 are also permitted as an alternative to version\nspecifiers.\n\nFollowing PEP 508, version specifiers no longer need to be surrounded by\nparentheses in the fields Requires-Dist, Provides-Dist, Obsoletes-Dist\nor Requires-External, so e.g. requests >= 2.8.1 is now a valid value.\nThe recommended format is without parentheses, but tools parsing\nmetadata should also be able to handle version specifiers in\nparentheses. Further, public index servers MAY prohibit strict version\nmatching clauses or direct references in these fields.\n\nUsage of version specifiers is otherwise unchanged from PEP 345.\n\nEnvironment markers\n\nAn environment marker is a marker that can be added at the end of a\nfield after a semi-colon (\";\"), to add a condition about the execution\nenvironment.\n\nThe environment marker format used to declare such a condition is\ndefined in the environment markers section of PEP 508.\n\nUsage of environment markers is otherwise unchanged from PEP 345.\n\nJSON-compatible Metadata\n\nIt may be necessary to store metadata in a data structure which does not\nallow for multiple repeated keys, such as JSON.\n\nThe canonical method to transform metadata fields into such a data\nstructure is as follows:\n\n1. The original key-value format should be read with\n email.parser.HeaderParser;\n2. All transformed keys should be reduced to lower case. Hyphens should\n be replaced with underscores, but otherwise should retain all other\n characters;\n3. The transformed value for any field marked with \"(Multiple-use\")\n should be a single list containing all the original values for the\n given key;\n4. The Keywords field should be converted to a list by splitting the\n original value on commas;\n5. The message body, if present, should be set to the value of the\n description key.\n6. The result should be stored as a string-keyed dictionary.\n\nSummary of Differences From PEP 345\n\n- Metadata-Version is now 2.1.\n- Fields are now specified via the Core Metadata Specification.\n- Added two new fields: Description-Content-Type and Provides-Extra\n- Acceptable values for the Name field are now specified as per PEP\n 508.\n- Added canonical method of transformation into JSON-compatible data\n structure.\n\nReferences\n\nThis document specifies version 2.1 of the metadata format. Version 1.0\nis specified in PEP 241. Version 1.1 is specified in PEP 314. Version\n1.2 is specified in PEP 345. Version 2.0, while not formally accepted,\nwas specified in PEP 426.\n\nCopyright\n\nThis document has been placed in the public domain.\n\nAcknowledgements\n\nThanks to Alyssa Coghlan and Thomas Kluyver for contributing to this\nPEP.\n\n\f\n\n Local Variables: mode: indented-text indent-tabs-mode: nil\n sentence-end-double-space: t fill-column: 80 End:"},"source":{"kind":"string","value":"python-peps"},"added":{"kind":"string","value":"2024-10-18T13:23:33.234809"},"created":{"kind":"timestamp","value":"2017-12-01T00:00:00","string":"2017-12-01T00:00:00"},"metadata":{"kind":"string","value":"{\n \"license\": \"Public Domain\",\n \"url\": \"https://peps.python.org/pep-0566/\",\n \"authors\": [\n \"Dustin Ingram\"\n ],\n \"pep_number\": \"0566\",\n \"pandoc_version\": \"3.5\"\n}"}}},{"rowIdx":557,"cells":{"id":{"kind":"string","value":"0567"},"text":{"kind":"string","value":"PEP: 567 Title: Context Variables Version: $Revision$ Last-Modified:\n$Date$ Author: Yury Selivanov Status: Final Type:\nStandards Track Content-Type: text/x-rst Created: 12-Dec-2017\nPython-Version: 3.7 Post-History: 12-Dec-2017, 28-Dec-2017, 16-Jan-2018\n\nAbstract\n\nThis PEP proposes a new contextvars module and a set of new CPython C\nAPIs to support context variables. This concept is similar to\nthread-local storage (TLS), but, unlike TLS, it also allows correctly\nkeeping track of values per asynchronous task, e.g. asyncio.Task.\n\nThis proposal is a simplified version of PEP 550. The key difference is\nthat this PEP is concerned only with solving the case for asynchronous\ntasks, not for generators. There are no proposed modifications to any\nbuilt-in types or to the interpreter.\n\nThis proposal is not strictly related to Python Context Managers.\nAlthough it does provide a mechanism that can be used by Context\nManagers to store their state.\n\nAPI Design and Implementation Revisions\n\nIn Python 3.7.1 the signatures of all context variables C APIs were\nchanged to use PyObject * pointers instead of PyContext *,\nPyContextVar *, and PyContextToken *, e.g.:\n\n // in 3.7.0:\n PyContext *PyContext_New(void);\n\n // in 3.7.1+:\n PyObject *PyContext_New(void);\n\nSee[1] for more details. The C API section of this PEP was updated to\nreflect the change.\n\nRationale\n\nThread-local variables are insufficient for asynchronous tasks that\nexecute concurrently in the same OS thread. Any context manager that\nsaves and restores a context value using threading.local() will have its\ncontext values bleed to other code unexpectedly when used in async/await\ncode.\n\nA few examples where having a working context local storage for\nasynchronous code is desirable:\n\n- Context managers like decimal contexts and numpy.errstate.\n- Request-related data, such as security tokens and request data in\n web applications, language context for gettext, etc.\n- Profiling, tracing, and logging in large code bases.\n\nIntroduction\n\nThe PEP proposes a new mechanism for managing context variables. The key\nclasses involved in this mechanism are contextvars.Context and\ncontextvars.ContextVar. The PEP also proposes some policies for using\nthe mechanism around asynchronous tasks.\n\nThe proposed mechanism for accessing context variables uses the\nContextVar class. A module (such as decimal) that wishes to use the new\nmechanism should:\n\n- declare a module-global variable holding a ContextVar to serve as a\n key;\n- access the current value via the get() method on the key variable;\n- modify the current value via the set() method on the key variable.\n\nThe notion of \"current value\" deserves special consideration: different\nasynchronous tasks that exist and execute concurrently may have\ndifferent values for the same key. This idea is well known from\nthread-local storage but in this case the locality of the value is not\nnecessarily bound to a thread. Instead, there is the notion of the\n\"current Context\" which is stored in thread-local storage. Manipulation\nof the current context is the responsibility of the task framework, e.g.\nasyncio.\n\nA Context is a mapping of ContextVar objects to their values. The\nContext itself exposes the abc.Mapping interface (not\nabc.MutableMapping!), so it cannot be modified directly. To set a new\nvalue for a context variable in a Context object, the user needs to:\n\n- make the Context object \"current\" using the Context.run() method;\n- use ContextVar.set() to set a new value for the context variable.\n\nThe ContextVar.get() method looks for the variable in the current\nContext object using self as a key.\n\nIt is not possible to get a direct reference to the current Context\nobject, but it is possible to obtain a shallow copy of it using the\ncontextvars.copy_context() function. This ensures that the caller of\nContext.run() is the sole owner of its Context object.\n\nSpecification\n\nA new standard library module contextvars is added with the following\nAPIs:\n\n1. The copy_context() -> Context function is used to get a copy of the\n current Context object for the current OS thread.\n2. The ContextVar class to declare and access context variables.\n3. The Context class encapsulates context state. Every OS thread stores\n a reference to its current Context instance. It is not possible to\n control that reference directly. Instead, the\n Context.run(callable, *args, **kwargs) method is used to run Python\n code in another context.\n\ncontextvars.ContextVar\n\nThe ContextVar class has the following constructor signature:\nContextVar(name, *, default=_NO_DEFAULT). The name parameter is used for\nintrospection and debug purposes, and is exposed as a read-only\nContextVar.name attribute. The default parameter is optional. Example:\n\n # Declare a context variable 'var' with the default value 42.\n var = ContextVar('var', default=42)\n\n(The _NO_DEFAULT is an internal sentinel object used to detect if the\ndefault value was provided.)\n\nContextVar.get(default=_NO_DEFAULT) returns a value for the context\nvariable for the current Context:\n\n # Get the value of `var`.\n var.get()\n\nIf there is no value for the variable in the current context,\nContextVar.get() will:\n\n- return the value of the default argument of the get() method, if\n provided; or\n- return the default value for the context variable, if provided; or\n- raise a LookupError.\n\nContextVar.set(value) -> Token is used to set a new value for the\ncontext variable in the current Context:\n\n # Set the variable 'var' to 1 in the current context.\n var.set(1)\n\nContextVar.reset(token) is used to reset the variable in the current\ncontext to the value it had before the set() operation that created the\ntoken (or to remove the variable if it was not set):\n\n # Assume: var.get(None) is None\n\n # Set 'var' to 1:\n token = var.set(1)\n try:\n # var.get() == 1\n finally:\n var.reset(token)\n\n # After reset: var.get(None) is None,\n # i.e. 'var' was removed from the current context.\n\nThe ContextVar.reset() method raises:\n\n- a ValueError if it is called with a token object created by another\n variable;\n- a ValueError if the current Context object does not match the one\n where the token object was created;\n- a RuntimeError if the token object has already been used once to\n reset the variable.\n\ncontextvars.Token\n\ncontextvars.Token is an opaque object that should be used to restore the\nContextVar to its previous value, or to remove it from the context if\nthe variable was not set before. It can be created only by calling\nContextVar.set().\n\nFor debug and introspection purposes it has:\n\n- a read-only attribute Token.var pointing to the variable that\n created the token;\n- a read-only attribute Token.old_value set to the value the variable\n had before the set() call, or to Token.MISSING if the variable\n wasn't set before.\n\ncontextvars.Context\n\nContext object is a mapping of context variables to values.\n\nContext() creates an empty context. To get a copy of the current Context\nfor the current OS thread, use the contextvars.copy_context() method:\n\n ctx = contextvars.copy_context()\n\nTo run Python code in some Context, use Context.run() method:\n\n ctx.run(function)\n\nAny changes to any context variables that function causes will be\ncontained in the ctx context:\n\n var = ContextVar('var')\n var.set('spam')\n\n def main():\n # 'var' was set to 'spam' before\n # calling 'copy_context()' and 'ctx.run(main)', so:\n # var.get() == ctx[var] == 'spam'\n\n var.set('ham')\n\n # Now, after setting 'var' to 'ham':\n # var.get() == ctx[var] == 'ham'\n\n ctx = copy_context()\n\n # Any changes that the 'main' function makes to 'var'\n # will be contained in 'ctx'.\n ctx.run(main)\n\n # The 'main()' function was run in the 'ctx' context,\n # so changes to 'var' are contained in it:\n # ctx[var] == 'ham'\n\n # However, outside of 'ctx', 'var' is still set to 'spam':\n # var.get() == 'spam'\n\nContext.run() raises a RuntimeError when called on the same context\nobject from more than one OS thread, or when called recursively.\n\nContext.copy() returns a shallow copy of the context object.\n\nContext objects implement the collections.abc.Mapping ABC. This can be\nused to introspect contexts:\n\n ctx = contextvars.copy_context()\n\n # Print all context variables and their values in 'ctx':\n print(ctx.items())\n\n # Print the value of 'some_variable' in context 'ctx':\n print(ctx[some_variable])\n\nNote that all Mapping methods, including Context.__getitem__ and\nContext.get, ignore default values for context variables (i.e.\nContextVar.default). This means that for a variable var that was created\nwith a default value and was not set in the context:\n\n- context[var] raises a KeyError,\n- var in context returns False,\n- the variable isn't included in context.items(), etc.\n\nasyncio\n\nasyncio uses Loop.call_soon(), Loop.call_later(), and Loop.call_at() to\nschedule the asynchronous execution of a function. asyncio.Task uses\ncall_soon() to run the wrapped coroutine.\n\nWe modify Loop.call_{at,later,soon} and Future.add_done_callback() to\naccept the new optional context keyword-only argument, which defaults to\nthe current context:\n\n def call_soon(self, callback, *args, context=None):\n if context is None:\n context = contextvars.copy_context()\n\n # ... some time later\n context.run(callback, *args)\n\nTasks in asyncio need to maintain their own context that they inherit\nfrom the point they were created at. asyncio.Task is modified as\nfollows:\n\n class Task:\n def __init__(self, coro):\n ...\n # Get the current context snapshot.\n self._context = contextvars.copy_context()\n self._loop.call_soon(self._step, context=self._context)\n\n def _step(self, exc=None):\n ...\n # Every advance of the wrapped coroutine is done in\n # the task's context.\n self._loop.call_soon(self._step, context=self._context)\n ...\n\nImplementation\n\nThis section explains high-level implementation details in pseudo-code.\nSome optimizations are omitted to keep this section short and clear.\n\nThe Context mapping is implemented using an immutable dictionary. This\nallows for a O(1) implementation of the copy_context() function. The\nreference implementation implements the immutable dictionary using Hash\nArray Mapped Tries (HAMT); see PEP 550 for analysis of HAMT\nperformance[2].\n\nFor the purposes of this section, we implement an immutable dictionary\nusing a copy-on-write approach and the built-in dict type:\n\n class _ContextData:\n\n def __init__(self):\n self._mapping = dict()\n\n def __getitem__(self, key):\n return self._mapping[key]\n\n def __contains__(self, key):\n return key in self._mapping\n\n def __len__(self):\n return len(self._mapping)\n\n def __iter__(self):\n return iter(self._mapping)\n\n def set(self, key, value):\n copy = _ContextData()\n copy._mapping = self._mapping.copy()\n copy._mapping[key] = value\n return copy\n\n def delete(self, key):\n copy = _ContextData()\n copy._mapping = self._mapping.copy()\n del copy._mapping[key]\n return copy\n\nEvery OS thread has a reference to the current Context object:\n\n class PyThreadState:\n context: Context\n\ncontextvars.Context is a wrapper around _ContextData:\n\n class Context(collections.abc.Mapping):\n\n _data: _ContextData\n _prev_context: Optional[Context]\n\n def __init__(self):\n self._data = _ContextData()\n self._prev_context = None\n\n def run(self, callable, *args, **kwargs):\n if self._prev_context is not None:\n raise RuntimeError(\n f'cannot enter context: {self} is already entered')\n\n ts: PyThreadState = PyThreadState_Get()\n self._prev_context = ts.context\n try:\n ts.context = self\n return callable(*args, **kwargs)\n finally:\n ts.context = self._prev_context\n self._prev_context = None\n\n def copy(self):\n new = Context()\n new._data = self._data\n return new\n\n # Implement abstract Mapping.__getitem__\n def __getitem__(self, var):\n return self._data[var]\n\n # Implement abstract Mapping.__contains__\n def __contains__(self, var):\n return var in self._data\n\n # Implement abstract Mapping.__len__\n def __len__(self):\n return len(self._data)\n\n # Implement abstract Mapping.__iter__\n def __iter__(self):\n return iter(self._data)\n\n # The rest of the Mapping methods are implemented\n # by collections.abc.Mapping.\n\ncontextvars.copy_context() is implemented as follows:\n\n def copy_context():\n ts: PyThreadState = PyThreadState_Get()\n return ts.context.copy()\n\ncontextvars.ContextVar interacts with PyThreadState.context directly:\n\n class ContextVar:\n\n def __init__(self, name, *, default=_NO_DEFAULT):\n self._name = name\n self._default = default\n\n @property\n def name(self):\n return self._name\n\n def get(self, default=_NO_DEFAULT):\n ts: PyThreadState = PyThreadState_Get()\n try:\n return ts.context[self]\n except KeyError:\n pass\n\n if default is not _NO_DEFAULT:\n return default\n\n if self._default is not _NO_DEFAULT:\n return self._default\n\n raise LookupError\n\n def set(self, value):\n ts: PyThreadState = PyThreadState_Get()\n\n data: _ContextData = ts.context._data\n try:\n old_value = data[self]\n except KeyError:\n old_value = Token.MISSING\n\n updated_data = data.set(self, value)\n ts.context._data = updated_data\n return Token(ts.context, self, old_value)\n\n def reset(self, token):\n if token._used:\n raise RuntimeError(\"Token has already been used once\")\n\n if token._var is not self:\n raise ValueError(\n \"Token was created by a different ContextVar\")\n\n ts: PyThreadState = PyThreadState_Get()\n if token._context is not ts.context:\n raise ValueError(\n \"Token was created in a different Context\")\n\n if token._old_value is Token.MISSING:\n ts.context._data = ts.context._data.delete(token._var)\n else:\n ts.context._data = ts.context._data.set(token._var,\n token._old_value)\n\n token._used = True\n\nNote that the in the reference implementation, ContextVar.get() has an\ninternal cache for the most recent value, which allows to bypass a hash\nlookup. This is similar to the optimization the decimal module\nimplements to retrieve its context from PyThreadState_GetDict(). See PEP\n550 which explains the implementation of the cache in great detail.\n\nThe Token class is implemented as follows:\n\n class Token:\n\n MISSING = object()\n\n def __init__(self, context, var, old_value):\n self._context = context\n self._var = var\n self._old_value = old_value\n self._used = False\n\n @property\n def var(self):\n return self._var\n\n @property\n def old_value(self):\n return self._old_value\n\nSummary of the New APIs\n\nPython API\n\n1. A new contextvars module with ContextVar, Context, and Token\n classes, and a copy_context() function.\n2. asyncio.Loop.call_at(), asyncio.Loop.call_later(),\n asyncio.Loop.call_soon(), and asyncio.Future.add_done_callback() run\n callback functions in the context they were called in. A new context\n keyword-only parameter can be used to specify a custom context.\n3. asyncio.Task is modified internally to maintain its own context.\n\nC API\n\n1. PyObject * PyContextVar_New(char *name, PyObject *default): create a\n ContextVar object. The default argument can be NULL, which means\n that the variable has no default value.\n\n2. int PyContextVar_Get(PyObject *, PyObject *default_value, PyObject **value):\n return -1 if an error occurs during the lookup, 0 otherwise. If a\n value for the context variable is found, it will be set to the value\n pointer. Otherwise, value will be set to default_value when it is\n not NULL. If default_value is NULL, value will be set to the default\n value of the variable, which can be NULL too. value is always a new\n reference.\n\n3. PyObject * PyContextVar_Set(PyObject *, PyObject *): set the value\n of the variable in the current context.\n\n4. PyContextVar_Reset(PyObject *, PyObject *): reset the value of the\n context variable.\n\n5. PyObject * PyContext_New(): create a new empty context.\n\n6. PyObject * PyContext_Copy(PyObject *): return a shallow copy of the\n passed context object.\n\n7. PyObject * PyContext_CopyCurrent(): get a copy of the current\n context.\n\n8. int PyContext_Enter(PyObject *) and int PyContext_Exit(PyObject *)\n allow to set and restore the context for the current OS thread. It\n is required to always restore the previous context:\n\n PyObject *old_ctx = PyContext_Copy();\n if (old_ctx == NULL) goto error;\n\n if (PyContext_Enter(new_ctx)) goto error;\n\n // run some code\n\n if (PyContext_Exit(old_ctx)) goto error;\n\nRejected Ideas\n\nReplicating threading.local() interface\n\nPlease refer to PEP 550 where this topic is covered in detail:[3].\n\nReplacing Token with ContextVar.unset()\n\nThe Token API allows to get around having a ContextVar.unset() method,\nwhich is incompatible with chained contexts design of PEP 550. Future\ncompatibility with PEP 550 is desired in case there is demand to support\ncontext variables in generators and asynchronous generators.\n\nThe Token API also offers better usability: the user does not have to\nspecial-case absence of a value. Compare:\n\n token = cv.set(new_value)\n try:\n # cv.get() is new_value\n finally:\n cv.reset(token)\n\nwith:\n\n _deleted = object()\n old = cv.get(default=_deleted)\n try:\n cv.set(blah)\n # code\n finally:\n if old is _deleted:\n cv.unset()\n else:\n cv.set(old)\n\nHaving Token.reset() instead of ContextVar.reset()\n\nNathaniel Smith suggested to implement the ContextVar.reset() method\ndirectly on the Token class, so instead of:\n\n token = var.set(value)\n # ...\n var.reset(token)\n\nwe would write:\n\n token = var.set(value)\n # ...\n token.reset()\n\nHaving Token.reset() would make it impossible for a user to attempt to\nreset a variable with a token object created by another variable.\n\nThis proposal was rejected for the reason of ContextVar.reset() being\nclearer to the human reader of the code which variable is being reset.\n\nMaking Context objects picklable\n\nProposed by Antoine Pitrou, this could enable transparent cross-process\nuse of Context objects, so the Offloading execution to other threads\nexample would work with a ProcessPoolExecutor too.\n\nEnabling this is problematic because of the following reasons:\n\n1. ContextVar objects do not have __module__ and __qualname__\n attributes, making straightforward pickling of Context objects\n impossible. This is solvable by modifying the API to either auto\n detect the module where a context variable is defined, or by adding\n a new keyword-only \"module\" parameter to ContextVar constructor.\n2. Not all context variables refer to picklable objects. Making a\n ContextVar picklable must be an opt-in.\n\nGiven the time frame of the Python 3.7 release schedule it was decided\nto defer this proposal to Python 3.8.\n\nMaking Context a MutableMapping\n\nMaking the Context class implement the abc.MutableMapping interface\nwould mean that it is possible to set and unset variables using\nContext[var] = value and del Context[var] operations.\n\nThis proposal was deferred to Python 3.8+ because of the following:\n\n1. If in Python 3.8 it is decided that generators should support\n context variables (see PEP 550 and PEP 568), then Context would be\n transformed into a chain-map of context variables mappings (as every\n generator would have its own mapping). That would make mutation\n operations like Context.__delitem__ confusing, as they would operate\n only on the topmost mapping of the chain.\n\n2. Having a single way of mutating the context (ContextVar.set() and\n ContextVar.reset() methods) makes the API more straightforward.\n\n For example, it would be non-obvious why the below code fragment\n does not work as expected:\n\n var = ContextVar('var')\n\n ctx = copy_context()\n ctx[var] = 'value'\n print(ctx[var]) # Prints 'value'\n\n print(var.get()) # Raises a LookupError\n\n While the following code would work:\n\n ctx = copy_context()\n\n def func():\n ctx[var] = 'value'\n\n # Contrary to the previous example, this would work\n # because 'func()' is running within 'ctx'.\n print(ctx[var])\n print(var.get())\n\n ctx.run(func)\n\n3. If Context was mutable it would mean that context variables could be\n mutated separately (or concurrently) from the code that runs within\n the context. That would be similar to obtaining a reference to a\n running Python frame object and modifying its f_locals from another\n OS thread. Having one single way to assign values to context\n variables makes contexts conceptually simpler and more predictable,\n while keeping the door open for future performance optimizations.\n\nHaving initial values for ContextVars\n\nNathaniel Smith proposed to have a required initial_value keyword-only\nargument for the ContextVar constructor.\n\nThe main argument against this proposal is that for some types there is\nsimply no sensible \"initial value\" except None. E.g. consider a web\nframework that stores the current HTTP request object in a context\nvariable. With the current semantics it is possible to create a context\nvariable without a default value:\n\n # Framework:\n current_request: ContextVar[Request] = \\\n ContextVar('current_request')\n\n\n # Later, while handling an HTTP request:\n request: Request = current_request.get()\n\n # Work with the 'request' object:\n return request.method\n\nNote that in the above example there is no need to check if request is\nNone. It is simply expected that the framework always sets the\ncurrent_request variable, or it is a bug (in which case\ncurrent_request.get() would raise a LookupError).\n\nIf, however, we had a required initial value, we would have to guard\nagainst None values explicitly:\n\n # Framework:\n current_request: ContextVar[Optional[Request]] = \\\n ContextVar('current_request', initial_value=None)\n\n\n # Later, while handling an HTTP request:\n request: Optional[Request] = current_request.get()\n\n # Check if the current request object was set:\n if request is None:\n raise RuntimeError\n\n # Work with the 'request' object:\n return request.method\n\nMoreover, we can loosely compare context variables to regular Python\nvariables and to threading.local() objects. Both of them raise errors on\nfailed lookups (NameError and AttributeError respectively).\n\nBackwards Compatibility\n\nThis proposal preserves 100% backwards compatibility.\n\nLibraries that use threading.local() to store context-related values,\ncurrently work correctly only for synchronous code. Switching them to\nuse the proposed API will keep their behavior for synchronous code\nunmodified, but will automatically enable support for asynchronous code.\n\nExamples\n\nConverting code that uses threading.local()\n\nA typical code fragment that uses threading.local() usually looks like\nthe following:\n\n class PrecisionStorage(threading.local):\n # Subclass threading.local to specify a default value.\n value = 0.0\n\n precision = PrecisionStorage()\n\n # To set a new precision:\n precision.value = 0.5\n\n # To read the current precision:\n print(precision.value)\n\nSuch code can be converted to use the contextvars module:\n\n precision = contextvars.ContextVar('precision', default=0.0)\n\n # To set a new precision:\n precision.set(0.5)\n\n # To read the current precision:\n print(precision.get())\n\nOffloading execution to other threads\n\nIt is possible to run code in a separate OS thread using a copy of the\ncurrent thread context:\n\n executor = ThreadPoolExecutor()\n current_context = contextvars.copy_context()\n\n executor.submit(current_context.run, some_function)\n\nReference Implementation\n\nThe reference implementation can be found here:[4]. See also issue\n32436[5].\n\nAcceptance\n\nPEP 567 was accepted by Guido on Monday, January 22, 2018[6]. The\nreference implementation was merged on the same day.\n\nReferences\n\nAcknowledgments\n\nI thank Guido van Rossum, Nathaniel Smith, Victor Stinner, Elvis\nPranskevichus, Alyssa Coghlan, Antoine Pitrou, INADA Naoki, Paul Moore,\nEric Snow, Greg Ewing, and many others for their feedback, ideas, edits,\ncriticism, code reviews, and discussions around this PEP.\n\nCopyright\n\nThis document has been placed in the public domain.\n\n[1] https://bugs.python.org/issue34762\n\n[2] 550#appendix-hamt-performance-analysis\n\n[3] 550#replication-of-threading-local-interface\n\n[4] https://github.com/python/cpython/pull/5027\n\n[5] https://bugs.python.org/issue32436\n\n[6] https://mail.python.org/pipermail/python-dev/2018-January/151878.html"},"source":{"kind":"string","value":"python-peps"},"added":{"kind":"string","value":"2024-10-18T13:23:33.266899"},"created":{"kind":"timestamp","value":"2017-12-12T00:00:00","string":"2017-12-12T00:00:00"},"metadata":{"kind":"string","value":"{\n \"license\": \"Public Domain\",\n \"url\": \"https://peps.python.org/pep-0567/\",\n \"authors\": [\n \"Yury Selivanov\"\n ],\n \"pep_number\": \"0567\",\n \"pandoc_version\": \"3.5\"\n}"}}},{"rowIdx":558,"cells":{"id":{"kind":"string","value":"8016"},"text":{"kind":"string","value":"PEP: 8016 Title: The Steering Council Model Author: Nathaniel J. Smith,\nDonald Stufft Status: Accepted Type: Informational Topic: Governance\nContent-Type: text/x-rst Created: 01-Nov-2018\n\nNote\n\nThis PEP is retained for historical purposes, but the official\ngovernance document is now PEP 13.\n\nAbstract\n\nThis PEP proposes a model of Python governance based around a steering\ncouncil. The council has broad authority, which they seek to exercise as\nrarely as possible; instead, they use this power to establish standard\nprocesses, like those proposed in the other 801x-series PEPs. This\nfollows the general philosophy that it's better to split up large\nchanges into a series of small changes that can be reviewed\nindependently: instead of trying to do everything in one PEP, we focus\non providing a minimal-but-solid foundation for further governance\ndecisions.\n\nPEP Acceptance\n\nPEP 8016 was accepted by a core developer vote described in PEP 8001 on\nMonday, December 17, 2018.\n\nRationale\n\nThe main goals of this proposal are:\n\n- Be boring: We're not experts in governance, and we don't think\n Python is a good place to experiment with new and untried governance\n models. So this proposal sticks to mature, well-known, previously\n tested processes as much as possible. The high-level approach of a\n mostly-hands-off council is arguably the most common across large\n successful F/OSS projects, and low-level details are derived\n directly from Django's governance.\n- Be simple: We've attempted to pare things down to the minimum needed\n to make this workable: the council, the core team (who elect the\n council), and the process for changing the document. The goal is\n Minimum Viable Governance.\n- Be comprehensive: But for the things we need to define, we've tried\n to make sure to cover all the bases, because we don't want to go\n through this kind of crisis again. Having a clear and unambiguous\n set of rules also helps minimize confusion and resentment.\n- Be flexible and light-weight: We know that it will take time and\n experimentation to find the best processes for working together. By\n keeping this document as minimal as possible, we keep maximal\n flexibility for adjusting things later, while minimizing the need\n for heavy-weight and anxiety-provoking processes like whole-project\n votes.\n\nA number of details were discussed in this Discourse thread, and then\nthis thread has further discussion. These may be useful to anyone trying\nto understand the rationale for various minor decisions.\n\nSpecification\n\nThe steering council\n\nComposition\n\nThe steering council is a 5-person committee.\n\nMandate\n\nThe steering council shall work to:\n\n- Maintain the quality and stability of the Python language and\n CPython interpreter,\n- Make contributing as accessible, inclusive, and sustainable as\n possible,\n- Formalize and maintain the relationship between the core team and\n the PSF,\n- Establish appropriate decision-making processes for PEPs,\n- Seek consensus among contributors and the core team before acting in\n a formal capacity,\n- Act as a \"court of final appeal\" for decisions where all other\n methods have failed.\n\nPowers\n\nThe council has broad authority to make decisions about the project. For\nexample, they can:\n\n- Accept or reject PEPs\n- Enforce or update the project's code of conduct\n- Work with the PSF to manage any project assets\n- Delegate parts of their authority to other subcommittees or\n processes\n\nHowever, they cannot modify this PEP, or affect the membership of the\ncore team, except via the mechanisms specified in this PEP.\n\nThe council should look for ways to use these powers as little as\npossible. Instead of voting, it's better to seek consensus. Instead of\nruling on individual PEPs, it's better to define a standard process for\nPEP decision making (for example, by accepting one of the other 801x\nseries of PEPs). It's better to establish a Code of Conduct committee\nthan to rule on individual cases. And so on.\n\nTo use its powers, the council votes. Every council member must either\nvote or explicitly abstain. Members with conflicts of interest on a\nparticular vote must abstain. Passing requires support from a majority\nof non-abstaining council members.\n\nWhenever possible, the council's deliberations and votes shall be held\nin public.\n\nElecting the council\n\nA council election consists of two phases:\n\n- Phase 1: Candidates advertise their interest in serving. Candidates\n must be nominated by a core team member. Self-nominations are\n allowed.\n- Phase 2: Each core team member can vote for zero to five of the\n candidates. Voting is performed anonymously. Candidates are ranked\n by the total number of votes they receive. If a tie occurs, it may\n be resolved by mutual agreement among the candidates, or else the\n winner will be chosen at random.\n\nEach phase lasts one to two weeks, at the outgoing council's discretion.\nFor the initial election, both phases will last two weeks.\n\nThe election process is managed by a returns officer nominated by the\noutgoing steering council. For the initial election, the returns officer\nwill be nominated by the PSF Executive Director.\n\nThe council should ideally reflect the diversity of Python contributors\nand users, and core team members are encouraged to vote accordingly.\n\nTerm\n\nA new council is elected after each feature release. Each council's term\nruns from when their election results are finalized until the next\ncouncil's term starts. There are no term limits.\n\nVacancies\n\nCouncil members may resign their position at any time.\n\nWhenever there is a vacancy during the regular council term, the council\nmay vote to appoint a replacement to serve out the rest of the term.\n\nIf a council member drops out of touch and cannot be contacted for a\nmonth or longer, then the rest of the council may vote to replace them.\n\nConflicts of interest\n\nWhile we trust council members to act in the best interests of Python\nrather than themselves or their employers, the mere appearance of any\none company dominating Python development could itself be harmful and\nerode trust. In order to avoid any appearance of conflict of interest,\nat most 2 members of the council can work for any single employer.\n\nIn a council election, if 3 of the top 5 vote-getters work for the same\nemployer, then whichever of them ranked lowest is disqualified and the\n6th-ranking candidate moves up into 5th place; this is repeated until a\nvalid council is formed.\n\nDuring a council term, if changing circumstances cause this rule to be\nbroken (for instance, due to a council member changing employment), then\none or more council members must resign to remedy the issue, and the\nresulting vacancies can then be filled as normal.\n\nEjecting core team members\n\nIn exceptional circumstances, it may be necessary to remove someone from\nthe core team against their will. (For example: egregious and ongoing\ncode of conduct violations.) This can be accomplished by a steering\ncouncil vote, but unlike other steering council votes, this requires at\nleast a two-thirds majority. With 5 members voting, this means that a\n3:2 vote is insufficient; 4:1 in favor is the minimum required for such\na vote to succeed. In addition, this is the one power of the steering\ncouncil which cannot be delegated, and this power cannot be used while a\nvote of no confidence is in process.\n\nIf the ejected core team member is also on the steering council, then\nthey are removed from the steering council as well.\n\nVote of no confidence\n\nIn exceptional circumstances, the core team may remove a sitting council\nmember, or the entire council, via a vote of no confidence.\n\nA no-confidence vote is triggered when a core team member calls for one\npublicly on an appropriate project communication channel, and another\ncore team member seconds the proposal.\n\nThe vote lasts for two weeks. Core team members vote for or against. If\nat least two thirds of voters express a lack of confidence, then the\nvote succeeds.\n\nThere are two forms of no-confidence votes: those targeting a single\nmember, and those targeting the council as a whole. The initial call for\na no-confidence vote must specify which type is intended. If a\nsingle-member vote succeeds, then that member is removed from the\ncouncil and the resulting vacancy can be handled in the usual way. If a\nwhole-council vote succeeds, the council is dissolved and a new council\nelection is triggered immediately.\n\nThe core team\n\nRole\n\nThe core team is the group of trusted volunteers who manage Python. They\nassume many roles required to achieve the project's goals, especially\nthose that require a high level of trust. They make the decisions that\nshape the future of the project.\n\nCore team members are expected to act as role models for the community\nand custodians of the project, on behalf of the community and all those\nwho rely on Python.\n\nThey will intervene, where necessary, in online discussions or at\nofficial Python events on the rare occasions that a situation arises\nthat requires intervention.\n\nThey have authority over the Python Project infrastructure, including\nthe Python Project website itself, the Python GitHub organization and\nrepositories, the bug tracker, the mailing lists, IRC channels, etc.\n\nPrerogatives\n\nCore team members may participate in formal votes, typically to nominate\nnew team members and to elect the steering council.\n\nMembership\n\nPython core team members demonstrate:\n\n- a good grasp of the philosophy of the Python Project\n- a solid track record of being constructive and helpful\n- significant contributions to the project's goals, in any form\n- willingness to dedicate some time to improving Python\n\nAs the project matures, contributions go beyond code. Here's an\nincomplete list of areas where contributions may be considered for\njoining the core team, in no particular order:\n\n- Working on community management and outreach\n- Providing support on the mailing lists and on IRC\n- Triaging tickets\n- Writing patches (code, docs, or tests)\n- Reviewing patches (code, docs, or tests)\n- Participating in design decisions\n- Providing expertise in a particular domain (security, i18n, etc.)\n- Managing the continuous integration infrastructure\n- Managing the servers (website, tracker, documentation, etc.)\n- Maintaining related projects (alternative interpreters, core\n infrastructure like packaging, etc.)\n- Creating visual designs\n\nCore team membership acknowledges sustained and valuable efforts that\nalign well with the philosophy and the goals of the Python project.\n\nIt is granted by receiving at least two-thirds positive votes in a core\nteam vote and no veto by the steering council.\n\nCore team members are always looking for promising contributors,\nteaching them how the project is managed, and submitting their names to\nthe core team's vote when they're ready.\n\nThere's no time limit on core team membership. However, in order to\nprovide the general public with a reasonable idea of how many people\nmaintain Python, core team members who have stopped contributing are\nencouraged to declare themselves as \"inactive\". Those who haven't made\nany non-trivial contribution in two years may be asked to move\nthemselves to this category, and moved there if they don't respond. To\nrecord and honor their contributions, inactive team members will\ncontinue to be listed alongside active core team members; and, if they\nlater resume contributing, they can switch back to active status at\nwill. While someone is in inactive status, though, they lose their\nactive privileges like voting or nominating for the steering council,\nand commit access.\n\nThe initial active core team members will consist of everyone currently\nlisted in the \"Python core\" team on GitHub, and the initial inactive\nmembers will consist of everyone else who has been a committer in the\npast.\n\nChanging this document\n\nChanges to this document require at least a two-thirds majority of votes\ncast in a core team vote.\n\nTODO\n\n- Lots of people contributed helpful suggestions and feedback; we\n should check if they're comfortable being added as co-authors\n- It looks like Aymeric Augustin wrote the whole Django doc, so\n presumably holds copyright; maybe we should ask him if he's willing\n to release it into the public domain so our copyright statement\n below can be simpler.\n\nAcknowledgements\n\nSubstantial text was copied shamelessly from The Django project's\ngovernance document.\n\nCopyright\n\nText copied from Django used under their license. The rest of this\ndocument has been placed in the public domain."},"source":{"kind":"string","value":"python-peps"},"added":{"kind":"string","value":"2024-10-18T13:23:33.284777"},"created":{"kind":"timestamp","value":"2018-11-01T00:00:00","string":"2018-11-01T00:00:00"},"metadata":{"kind":"string","value":"{\n \"license\": \"Public Domain\",\n \"url\": \"https://peps.python.org/pep-8016/\",\n \"authors\": [\n \"Donald Stufft\",\n \"Nathaniel J. Smith\"\n ],\n \"pep_number\": \"8016\",\n \"pandoc_version\": \"3.5\"\n}"}}},{"rowIdx":559,"cells":{"id":{"kind":"string","value":"0314"},"text":{"kind":"string","value":"PEP: 314 Title: Metadata for Python Software Packages 1.1 Author: A.M.\nKuchling, Richard Jones Discussions-To: distutils-sig@python.org Status:\nSuperseded Type: Standards Track Topic: Packaging Content-Type:\ntext/x-rst Created: 12-Apr-2003 Python-Version: 2.5 Post-History:\n29-Apr-2003 Replaces: 241 Superseded-By: 345\n\npackaging:core-metadata\n\nIntroduction\n\nThis PEP describes a mechanism for adding metadata to Python packages.\nIt includes specifics of the field names, and their semantics and usage.\n\nThis document specifies version 1.1 of the metadata format. Version 1.0\nis specified in PEP 241.\n\nIncluding Metadata in Packages\n\nThe Distutils sdist command will extract the metadata fields from the\narguments and write them to a file in the generated zipfile or tarball.\nThis file will be named PKG-INFO and will be placed in the top directory\nof the source distribution (where the README, INSTALL, and other files\nusually go).\n\nDevelopers may not provide their own PKG-INFO file. The sdist command\nwill, if it detects an existing PKG-INFO file, terminate with an\nappropriate error message. This should prevent confusion caused by the\nPKG-INFO and setup.py files being out of sync.\n\nThe PKG-INFO file format is a single set of 822 headers parseable by the\nrfc822.py module. The field names listed in the following section are\nused as the header names.\n\nFields\n\nThis section specifies the names and semantics of each of the supported\nmetadata fields.\n\nFields marked with \"(Multiple use)\" may be specified multiple times in a\nsingle PKG-INFO file. Other fields may only occur once in a PKG-INFO\nfile. Fields marked with \"(optional)\" are not required to appear in a\nvalid PKG-INFO file; all other fields must be present.\n\nMetadata-Version\n\nVersion of the file format; currently \"1.0\" and \"1.1\" are the only legal\nvalues here.\n\nExample:\n\n Metadata-Version: 1.1\n\nName\n\nThe name of the package.\n\nExample:\n\n Name: BeagleVote\n\nVersion\n\nA string containing the package's version number. This field should be\nparseable by one of the Version classes (StrictVersion or LooseVersion)\nin the distutils.version module.\n\nExample:\n\n Version: 1.0a2\n\nPlatform (multiple use)\n\nA comma-separated list of platform specifications, summarizing the\noperating systems supported by the package which are not listed in the\n\"Operating System\" Trove classifiers. See \"Classifier\" below.\n\nExample:\n\n Platform: ObscureUnix, RareDOS\n\nSupported-Platform (multiple use)\n\nBinary distributions containing a PKG-INFO file will use the\nSupported-Platform field in their metadata to specify the OS and CPU for\nwhich the binary package was compiled. The semantics of the\nSupported-Platform field are not specified in this PEP.\n\nExample:\n\n Supported-Platform: RedHat 7.2\n Supported-Platform: i386-win32-2791\n\nSummary\n\nA one-line summary of what the package does.\n\nExample:\n\n Summary: A module for collecting votes from beagles.\n\nDescription (optional)\n\nA longer description of the package that can run to several paragraphs.\nSoftware that deals with metadata should not assume any maximum size for\nthis field, though people shouldn't include their instruction manual as\nthe description.\n\nThe contents of this field can be written using reStructuredText\nmarkup[1]. For programs that work with the metadata, supporting markup\nis optional; programs can also display the contents of the field as-is.\nThis means that authors should be conservative in the markup they use.\n\nExample:\n\n Description: This module collects votes from beagles\n in order to determine their electoral wishes.\n Do *not* try to use this module with basset hounds;\n it makes them grumpy.\n\nKeywords (optional)\n\nA list of additional keywords to be used to assist searching for the\npackage in a larger catalog.\n\nExample:\n\n Keywords: dog puppy voting election\n\nHome-page (optional)\n\nA string containing the URL for the package's home page.\n\nExample:\n\n Home-page: http://www.example.com/~cschultz/bvote/\n\nDownload-URL\n\nA string containing the URL from which this version of the package can\nbe downloaded. (This means that the URL can't be something like\n\".../package-latest.tgz\", but instead must be \"../package-0.45.tgz\".)\n\nAuthor (optional)\n\nA string containing the author's name at a minimum; additional contact\ninformation may be provided.\n\nExample:\n\n Author: C. Schultz, Universal Features Syndicate,\n Los Angeles, CA \n\nAuthor-email\n\nA string containing the author's e-mail address. It can contain a name\nand e-mail address in the legal forms for a 822 'From:' header. It's not\noptional because cataloging systems can use the e-mail portion of this\nfield as a unique key representing the author. A catalog might provide\nauthors the ability to store their GPG key, personal home page, and\nother additional metadata about the author, and optionally the ability\nto associate several e-mail addresses with the same person.\nAuthor-related metadata fields are not covered by this PEP.\n\nExample:\n\n Author-email: \"C. Schultz\" \n\nLicense\n\nText indicating the license covering the package where the license is\nnot a selection from the \"License\" Trove classifiers. See \"Classifier\"\nbelow.\n\nExample:\n\n License: This software may only be obtained by sending the\n author a postcard, and then the user promises not\n to redistribute it.\n\nClassifier (multiple use)\n\nEach entry is a string giving a single classification value for the\npackage. Classifiers are described in PEP 301.\n\nExamples:\n\n Classifier: Development Status :: 4 - Beta\n Classifier: Environment :: Console (Text Based)\n\nRequires (multiple use)\n\nEach entry contains a string describing some other module or package\nrequired by this package.\n\nThe format of a requirement string is identical to that of a module or\npackage name usable with the 'import' statement, optionally followed by\na version declaration within parentheses.\n\nA version declaration is a series of conditional operators and version\nnumbers, separated by commas. Conditional operators must be one of \"<\",\n\">\", \"<=\", \">=\", \"==\", and \"!=\". Version numbers must be in the format\naccepted by the distutils.version.StrictVersion class: two or three\ndot-separated numeric components, with an optional \"pre-release\" tag on\nthe end consisting of the letter 'a' or 'b' followed by a number.\nExample version numbers are \"1.0\", \"2.3a2\", \"1.3.99\",\n\nAny number of conditional operators can be specified, e.g. the string\n\">1.0, !=1.3.4, <2.0\" is a legal version declaration.\n\nAll of the following are possible requirement strings: \"rfc822\", \"zlib\n(>=1.1.4)\", \"zope\".\n\nThere's no canonical list of what strings should be used; the Python\ncommunity is left to choose its own standards.\n\nExample:\n\n Requires: re\n Requires: sys\n Requires: zlib\n Requires: xml.parsers.expat (>1.0)\n Requires: psycopg\n\nProvides (multiple use)\n\nEach entry contains a string describing a package or module that will be\nprovided by this package once it is installed. These strings should\nmatch the ones used in Requirements fields. A version declaration may be\nsupplied (without a comparison operator); the package's version number\nwill be implied if none is specified.\n\nExample:\n\n Provides: xml\n Provides: xml.utils\n Provides: xml.utils.iso8601\n Provides: xml.dom\n Provides: xmltools (1.3)\n\nObsoletes (multiple use)\n\nEach entry contains a string describing a package or module that this\npackage renders obsolete, meaning that the two packages should not be\ninstalled at the same time. Version declarations can be supplied.\n\nThe most common use of this field will be in case a package name\nchanges, e.g. Gorgon 2.3 gets subsumed into Torqued Python 1.0. When you\ninstall Torqued Python, the Gorgon package should be removed.\n\nExample:\n\n Obsoletes: Gorgon\n\nSummary of Differences From PEP 241\n\n- Metadata-Version is now 1.1.\n- Added the Classifiers field from PEP 301.\n- The License and Platform files should now only be used if the\n platform or license can't be handled by an appropriate Classifier\n value.\n- Added fields: Download-URL, Requires, Provides, Obsoletes.\n\nOpen issues\n\nNone.\n\nAcknowledgements\n\nNone.\n\nReferences\n\nCopyright\n\nThis document has been placed in the public domain.\n\n[1] reStructuredText http://docutils.sourceforge.net/"},"source":{"kind":"string","value":"python-peps"},"added":{"kind":"string","value":"2024-10-18T13:23:33.300442"},"created":{"kind":"timestamp","value":"2003-04-12T00:00:00","string":"2003-04-12T00:00:00"},"metadata":{"kind":"string","value":"{\n \"license\": \"Public Domain\",\n \"url\": \"https://peps.python.org/pep-0314/\",\n \"authors\": [\n \"A.M. Kuchling\",\n \"Richard Jones\"\n ],\n \"pep_number\": \"0314\",\n \"pandoc_version\": \"3.5\"\n}"}}},{"rowIdx":560,"cells":{"id":{"kind":"string","value":"0367"},"text":{"kind":"string","value":"PEP: 367 Title: New Super Version: $Revision$ Last-Modified: $Date$\nAuthor: Calvin Spealman , Tim Delaney\n Status: Superseded Type: Standards Track\nContent-Type: text/x-rst Created: 28-Apr-2007 Python-Version: 2.6\nPost-History: 28-Apr-2007, 29-Apr-2007, 29-Apr-2007, 14-May-2007\n\nNumbering Note\n\nThis PEP has been renumbered to PEP 3135. The text below is the last\nversion submitted under the old number.\n\nAbstract\n\nThis PEP proposes syntactic sugar for use of the super type to\nautomatically construct instances of the super type binding to the class\nthat a method was defined in, and the instance (or class object for\nclassmethods) that the method is currently acting upon.\n\nThe premise of the new super usage suggested is as follows:\n\n super.foo(1, 2)\n\nto replace the old:\n\n super(Foo, self).foo(1, 2)\n\nand the current __builtin__.super be aliased to __builtin__.__super__\n(with __builtin__.super to be removed in Python 3.0).\n\nIt is further proposed that assignment to super become a SyntaxError,\nsimilar to the behaviour of None.\n\nRationale\n\nThe current usage of super requires an explicit passing of both the\nclass and instance it must operate from, requiring a breaking of the DRY\n(Don't Repeat Yourself) rule. This hinders any change in class name, and\nis often considered a wart by many.\n\nSpecification\n\nWithin the specification section, some special terminology will be used\nto distinguish similar and closely related concepts. \"super type\" will\nrefer to the actual builtin type named \"super\". A \"super instance\" is\nsimply an instance of the super type, which is associated with a class\nand possibly with an instance of that class.\n\nBecause the new super semantics are not backwards compatible with Python\n2.5, the new semantics will require a __future__ import:\n\n from __future__ import new_super\n\nThe current __builtin__.super will be aliased to __builtin__.__super__.\nThis will occur regardless of whether the new super semantics are\nactive. It is not possible to simply rename __builtin__.super, as that\nwould affect modules that do not use the new super semantics. In Python\n3.0 it is proposed that the name __builtin__.super will be removed.\n\nReplacing the old usage of super, calls to the next class in the MRO\n(method resolution order) can be made without explicitly creating a\nsuper instance (although doing so will still be supported via\n__super__). Every function will have an implicit local named super. This\nname behaves identically to a normal local, including use by inner\nfunctions via a cell, with the following exceptions:\n\n1. Assigning to the name super will raise a SyntaxError at compile\n time;\n2. Calling a static method or normal function that accesses the name\n super will raise a TypeError at runtime.\n\nEvery function that uses the name super, or has an inner function that\nuses the name super, will include a preamble that performs the\nequivalent of:\n\n super = __builtin__.__super__(, )\n\nwhere is the class that the method was defined in, and\n is the first parameter of the method (normally self for\ninstance methods, and cls for class methods). For static methods and\nnormal functions, will be None, resulting in a TypeError being\nraised during the preamble.\n\nNote: The relationship between super and __super__ is similar to that\nbetween import and __import__.\n\nMuch of this was discussed in the thread of the python-dev list, \"Fixing\nsuper anyone?\"[1].\n\nOpen Issues\n\nDetermining the class object to use\n\nThe exact mechanism for associating the method with the defining class\nis not specified in this PEP, and should be chosen for maximum\nperformance. For CPython, it is suggested that the class instance be\nheld in a C-level variable on the function object which is bound to one\nof NULL (not part of a class), Py_None (static method) or a class object\n(instance or class method).\n\nShould super actually become a keyword?\n\nWith this proposal, super would become a keyword to the same extent that\nNone is a keyword. It is possible that further restricting the super\nname may simplify implementation, however some are against the actual\nkeyword-ization of super. The simplest solution is often the correct\nsolution and the simplest solution may well not be adding additional\nkeywords to the language when they are not needed. Still, it may solve\nother open issues.\n\nClosed Issues\n\nsuper used with __call__ attributes\n\nIt was considered that it might be a problem that instantiating super\ninstances the classic way, because calling it would lookup the __call__\nattribute and thus try to perform an automatic super lookup to the next\nclass in the MRO. However, this was found to be false, because calling\nan object only looks up the __call__ method directly on the object's\ntype. The following example shows this in action.\n\n class A(object):\n def __call__(self):\n return '__call__'\n def __getattribute__(self, attr):\n if attr == '__call__':\n return lambda: '__getattribute__'\n a = A()\n assert a() == '__call__'\n assert a.__call__() == '__getattribute__'\n\nIn any case, with the renaming of __builtin__.super to\n__builtin__.__super__ this issue goes away entirely.\n\nReference Implementation\n\nIt is impossible to implement the above specification entirely in\nPython. This reference implementation has the following differences to\nthe specification:\n\n1. New super semantics are implemented using bytecode hacking.\n2. Assignment to super is not a SyntaxError. Also see point #4.\n3. Classes must either use the metaclass autosuper_meta or inherit from\n the base class autosuper to acquire the new super semantics.\n4. super is not an implicit local variable. In particular, for inner\n functions to be able to use the super instance, there must be an\n assignment of the form super = super in the method.\n\nThe reference implementation assumes that it is being run on Python\n2.5+.\n\n #!/usr/bin/env python\n #\n # autosuper.py\n\n from array import array\n import dis\n import new\n import types\n import __builtin__\n __builtin__.__super__ = __builtin__.super\n del __builtin__.super\n\n # We need these for modifying bytecode\n from opcode import opmap, HAVE_ARGUMENT, EXTENDED_ARG\n\n LOAD_GLOBAL = opmap['LOAD_GLOBAL']\n LOAD_NAME = opmap['LOAD_NAME']\n LOAD_CONST = opmap['LOAD_CONST']\n LOAD_FAST = opmap['LOAD_FAST']\n LOAD_ATTR = opmap['LOAD_ATTR']\n STORE_FAST = opmap['STORE_FAST']\n LOAD_DEREF = opmap['LOAD_DEREF']\n STORE_DEREF = opmap['STORE_DEREF']\n CALL_FUNCTION = opmap['CALL_FUNCTION']\n STORE_GLOBAL = opmap['STORE_GLOBAL']\n DUP_TOP = opmap['DUP_TOP']\n POP_TOP = opmap['POP_TOP']\n NOP = opmap['NOP']\n JUMP_FORWARD = opmap['JUMP_FORWARD']\n ABSOLUTE_TARGET = dis.hasjabs\n\n def _oparg(code, opcode_pos):\n return code[opcode_pos+1] + (code[opcode_pos+2] << 8)\n\n def _bind_autosuper(func, cls):\n co = func.func_code\n name = func.func_name\n newcode = array('B', co.co_code)\n codelen = len(newcode)\n newconsts = list(co.co_consts)\n newvarnames = list(co.co_varnames)\n\n # Check if the global 'super' keyword is already present\n try:\n sn_pos = list(co.co_names).index('super')\n except ValueError:\n sn_pos = None\n\n # Check if the varname 'super' keyword is already present\n try:\n sv_pos = newvarnames.index('super')\n except ValueError:\n sv_pos = None\n\n # Check if the cellvar 'super' keyword is already present\n try:\n sc_pos = list(co.co_cellvars).index('super')\n except ValueError:\n sc_pos = None\n\n # If 'super' isn't used anywhere in the function, we don't have anything to do\n if sn_pos is None and sv_pos is None and sc_pos is None:\n return func\n\n c_pos = None\n s_pos = None\n n_pos = None\n\n # Check if the 'cls_name' and 'super' objects are already in the constants\n for pos, o in enumerate(newconsts):\n if o is cls:\n c_pos = pos\n\n if o is __super__:\n s_pos = pos\n\n if o == name:\n n_pos = pos\n\n # Add in any missing objects to constants and varnames\n if c_pos is None:\n c_pos = len(newconsts)\n newconsts.append(cls)\n\n if n_pos is None:\n n_pos = len(newconsts)\n newconsts.append(name)\n\n if s_pos is None:\n s_pos = len(newconsts)\n newconsts.append(__super__)\n\n if sv_pos is None:\n sv_pos = len(newvarnames)\n newvarnames.append('super')\n\n # This goes at the start of the function. It is:\n #\n # super = __super__(cls, self)\n #\n # If 'super' is a cell variable, we store to both the\n # local and cell variables (i.e. STORE_FAST and STORE_DEREF).\n #\n preamble = [\n LOAD_CONST, s_pos & 0xFF, s_pos >> 8,\n LOAD_CONST, c_pos & 0xFF, c_pos >> 8,\n LOAD_FAST, 0, 0,\n CALL_FUNCTION, 2, 0,\n ]\n\n if sc_pos is None:\n # 'super' is not a cell variable - we can just use the local variable\n preamble += [\n STORE_FAST, sv_pos & 0xFF, sv_pos >> 8,\n ]\n else:\n # If 'super' is a cell variable, we need to handle LOAD_DEREF.\n preamble += [\n DUP_TOP,\n STORE_FAST, sv_pos & 0xFF, sv_pos >> 8,\n STORE_DEREF, sc_pos & 0xFF, sc_pos >> 8,\n ]\n\n preamble = array('B', preamble)\n\n # Bytecode for loading the local 'super' variable.\n load_super = array('B', [\n LOAD_FAST, sv_pos & 0xFF, sv_pos >> 8,\n ])\n\n preamble_len = len(preamble)\n need_preamble = False\n i = 0\n\n while i < codelen:\n opcode = newcode[i]\n need_load = False\n remove_store = False\n\n if opcode == EXTENDED_ARG:\n raise TypeError(\"Cannot use 'super' in function with EXTENDED_ARG opcode\")\n\n # If the opcode is an absolute target it needs to be adjusted\n # to take into account the preamble.\n elif opcode in ABSOLUTE_TARGET:\n oparg = _oparg(newcode, i) + preamble_len\n newcode[i+1] = oparg & 0xFF\n newcode[i+2] = oparg >> 8\n\n # If LOAD_GLOBAL(super) or LOAD_NAME(super) then we want to change it into\n # LOAD_FAST(super)\n elif (opcode == LOAD_GLOBAL or opcode == LOAD_NAME) and _oparg(newcode, i) == sn_pos:\n need_preamble = need_load = True\n\n # If LOAD_FAST(super) then we just need to add the preamble\n elif opcode == LOAD_FAST and _oparg(newcode, i) == sv_pos:\n need_preamble = need_load = True\n\n # If LOAD_DEREF(super) then we change it into LOAD_FAST(super) because\n # it's slightly faster.\n elif opcode == LOAD_DEREF and _oparg(newcode, i) == sc_pos:\n need_preamble = need_load = True\n\n if need_load:\n newcode[i:i+3] = load_super\n\n i += 1\n\n if opcode >= HAVE_ARGUMENT:\n i += 2\n\n # No changes needed - get out.\n if not need_preamble:\n return func\n\n # Our preamble will have 3 things on the stack\n co_stacksize = max(3, co.co_stacksize)\n\n # Conceptually, our preamble is on the `def` line.\n co_lnotab = array('B', co.co_lnotab)\n\n if co_lnotab:\n co_lnotab[0] += preamble_len\n\n co_lnotab = co_lnotab.tostring()\n\n # Our code consists of the preamble and the modified code.\n codestr = (preamble + newcode).tostring()\n\n codeobj = new.code(co.co_argcount, len(newvarnames), co_stacksize,\n co.co_flags, codestr, tuple(newconsts), co.co_names,\n tuple(newvarnames), co.co_filename, co.co_name,\n co.co_firstlineno, co_lnotab, co.co_freevars,\n co.co_cellvars)\n\n func.func_code = codeobj\n func.func_class = cls\n return func\n\n class autosuper_meta(type):\n def __init__(cls, name, bases, clsdict):\n UnboundMethodType = types.UnboundMethodType\n\n for v in vars(cls):\n o = getattr(cls, v)\n if isinstance(o, UnboundMethodType):\n _bind_autosuper(o.im_func, cls)\n\n class autosuper(object):\n __metaclass__ = autosuper_meta\n\n if __name__ == '__main__':\n class A(autosuper):\n def f(self):\n return 'A'\n\n class B(A):\n def f(self):\n return 'B' + super.f()\n\n class C(A):\n def f(self):\n def inner():\n return 'C' + super.f()\n\n # Needed to put 'super' into a cell\n super = super\n return inner()\n\n class D(B, C):\n def f(self, arg=None):\n var = None\n return 'D' + super.f()\n\n assert D().f() == 'DBCA'\n\nDisassembly of B.f and C.f reveals the different preambles used when\nsuper is simply a local variable compared to when it is used by an inner\nfunction.\n\n >>> dis.dis(B.f)\n\n 214 0 LOAD_CONST 4 ()\n 3 LOAD_CONST 2 ()\n 6 LOAD_FAST 0 (self)\n 9 CALL_FUNCTION 2\n 12 STORE_FAST 1 (super)\n\n 215 15 LOAD_CONST 1 ('B')\n 18 LOAD_FAST 1 (super)\n 21 LOAD_ATTR 1 (f)\n 24 CALL_FUNCTION 0\n 27 BINARY_ADD\n 28 RETURN_VALUE\n\n >>> dis.dis(C.f)\n\n 218 0 LOAD_CONST 4 ()\n 3 LOAD_CONST 2 ()\n 6 LOAD_FAST 0 (self)\n 9 CALL_FUNCTION 2\n 12 DUP_TOP\n 13 STORE_FAST 1 (super)\n 16 STORE_DEREF 0 (super)\n\n 219 19 LOAD_CLOSURE 0 (super)\n 22 LOAD_CONST 1 ()\n 25 MAKE_CLOSURE 0\n 28 STORE_FAST 2 (inner)\n\n 223 31 LOAD_FAST 1 (super)\n 34 STORE_DEREF 0 (super)\n\n 224 37 LOAD_FAST 2 (inner)\n 40 CALL_FUNCTION 0\n 43 RETURN_VALUE\n\nNote that in the final implementation, the preamble would not be part of\nthe bytecode of the method, but would occur immediately following\nunpacking of parameters.\n\nAlternative Proposals\n\nNo Changes\n\nAlthough its always attractive to just keep things how they are, people\nhave sought a change in the usage of super calling for some time, and\nfor good reason, all mentioned previously.\n\n- Decoupling from the class name (which might not even be bound to the\n right class anymore!)\n- Simpler looking, cleaner super calls would be better\n\nDynamic attribute on super type\n\nThe proposal adds a dynamic attribute lookup to the super type, which\nwill automatically determine the proper class and instance parameters.\nEach super attribute lookup identifies these parameters and performs the\nsuper lookup on the instance, as the current super implementation does\nwith the explicit invocation of a super instance upon a class and\ninstance.\n\nThis proposal relies on sys._getframe(), which is not appropriate for\nanything except a prototype implementation.\n\nsuper(__this_class__, self)\n\nThis is nearly an anti-proposal, as it basically relies on the\nacceptance of the __this_class__ PEP, which proposes a special name that\nwould always be bound to the class within which it is used. If that is\naccepted, __this_class__ could simply be used instead of the class' name\nexplicitly, solving the name binding issues[2].\n\nself.__super__.foo(*args)\n\nThe __super__ attribute is mentioned in this PEP in several places, and\ncould be a candidate for the complete solution, actually using it\nexplicitly instead of any super usage directly. However,\ndouble-underscore names are usually an internal detail, and attempted to\nbe kept out of everyday code.\n\nsuper(self, *args) or __super__(self, *args)\n\nThis solution only solves the problem of the type indication, does not\nhandle differently named super methods, and is explicit about the name\nof the instance. It is less flexible without being able to enacted on\nother method names, in cases where that is needed. One use case this\nfails is where a base-class has a factory classmethod and a subclass has\ntwo factory classmethods, both of which needing to properly make super\ncalls to the one in the base-class.\n\nsuper.foo(self, *args)\n\nThis variation actually eliminates the problems with locating the proper\ninstance, and if any of the alternatives were pushed into the spotlight,\nI would want it to be this one.\n\nsuper or super()\n\nThis proposal leaves no room for different names, signatures, or\napplication to other classes, or instances. A way to allow some similar\nuse alongside the normal proposal would be favorable, encouraging good\ndesign of multiple inheritance trees and compatible methods.\n\nsuper(*p, **kw)\n\nThere has been the proposal that directly calling super(*p, **kw) would\nbe equivalent to calling the method on the super object with the same\nname as the method currently being executed i.e. the following two\nmethods would be equivalent:\n\n def f(self, *p, **kw):\n super.f(*p, **kw)\n\n def f(self, *p, **kw):\n super(*p, **kw)\n\nThere is strong sentiment for and against this, but implementation and\nstyle concerns are obvious. Guido has suggested that this should be\nexcluded from this PEP on the principle of KISS (Keep It Simple Stupid).\n\nHistory\n\n29-Apr-2007 - Changed title from \"Super As A Keyword\" to \"New Super\"\n\n - Updated much of the language and added a terminology section for\n clarification in confusing places.\n - Added reference implementation and history sections.\n\n06-May-2007 - Updated by Tim Delaney to reflect discussions on the python-3000\n\n and python-dev mailing lists.\n\nReferences\n\nCopyright\n\nThis document has been placed in the public domain.\n\n\f\n\n Local Variables: mode: indented-text indent-tabs-mode: nil\n sentence-end-double-space: t fill-column: 70 coding: utf-8 End:\n\n[1] Fixing super anyone?\n(https://mail.python.org/pipermail/python-3000/2007-April/006667.html)\n\n[2] PEP 3130: Access to Module/Class/Function Currently Being Defined\n(this)\n(https://mail.python.org/pipermail/python-ideas/2007-April/000542.html)"},"source":{"kind":"string","value":"python-peps"},"added":{"kind":"string","value":"2024-10-18T13:23:33.317143"},"created":{"kind":"timestamp","value":"2007-04-28T00:00:00","string":"2007-04-28T00:00:00"},"metadata":{"kind":"string","value":"{\n \"license\": \"Public Domain\",\n \"url\": \"https://peps.python.org/pep-0367/\",\n \"authors\": [\n \"Calvin Spealman\"\n ],\n \"pep_number\": \"0367\",\n \"pandoc_version\": \"3.5\"\n}"}}},{"rowIdx":561,"cells":{"id":{"kind":"string","value":"0569"},"text":{"kind":"string","value":"PEP: 569 Title: Python 3.8 Release Schedule Author: Łukasz Langa\n Status: Final Type: Informational Topic: Release\nCreated: 27-Jan-2018 Python-Version: 3.8\n\nAbstract\n\nThis document describes the development and release schedule for Python\n3.8. The schedule primarily concerns itself with PEP-sized items.\n\nRelease Manager and Crew\n\n- 3.8 Release Manager: Łukasz Langa\n- Windows installers: Steve Dower\n- Mac installers: Ned Deily\n- Documentation: Julien Palard\n\n3.8 Lifespan\n\n3.8 will receive bugfix updates approximately every 2 months for\napproximately 18 months. Some time after the release of 3.9.0 final, the\nninth and final 3.8 bugfix update was released. After that, security\nupdates (source only) were released for 5 years until the release of\nPython 3.13.0 final.\n\nAs of 2024-10-07, 3.8 has reached the end-of-life phase of its release\ncycle. 3.8.20 was the final security release. The codebase for 3.8 is\nnow frozen and no further updates will be provided nor issues of any\nkind will be accepted on the bug tracker.\n\nRelease Schedule\n\n3.8.0 schedule\n\n- 3.8 development begins: Monday, 2018-01-29\n- 3.8.0 alpha 1: Sunday, 2019-02-03\n- 3.8.0 alpha 2: Monday, 2019-02-25\n- 3.8.0 alpha 3: Monday, 2019-03-25\n- 3.8.0 alpha 4: Monday, 2019-05-06\n- 3.8.0 beta 1: Tuesday, 2019-06-04 (No new features beyond this\n point.)\n- 3.8.0 beta 2: Thursday, 2019-07-04\n- 3.8.0 beta 3: Monday, 2019-07-29\n- 3.8.0 beta 4: Friday, 2019-08-30\n- 3.8.0 candidate 1: Tuesday, 2019-10-01\n- 3.8.0 final: Monday, 2019-10-14\n\nBugfix releases\n\n- 3.8.1rc1: Tuesday, 2019-12-10\n- 3.8.1: Wednesday, 2019-12-18\n- 3.8.2rc1: Monday, 2020-02-10\n- 3.8.2rc2: Monday, 2020-02-17\n- 3.8.2: Monday, 2020-02-24\n- 3.8.3rc1: Wednesday, 2020-04-29\n- 3.8.3: Wednesday, 2020-05-13\n- 3.8.4rc1: Tuesday, 2020-06-30\n- 3.8.4: Monday, 2020-07-13\n- 3.8.5: Monday, 2020-07-20 (security hotfix)\n- 3.8.6rc1: Tuesday, 2020-09-08\n- 3.8.6: Thursday, 2020-09-24\n- 3.8.7rc1: Monday, 2020-12-07\n- 3.8.7: Monday, 2020-12-21\n- 3.8.8rc1: Tuesday, 2021-02-16\n- 3.8.8: Friday, 2021-02-19\n- 3.8.9: Friday, 2021-04-02 (security hotfix)\n- 3.8.10: Monday, 2021-05-03 (final regular bugfix release with binary\n installers)\n\nSource-only security fix releases\n\nProvided irregularly on an \"as-needed\" basis until October 7th 2024.\n\n- 3.8.11: Monday, 2021-06-28\n- 3.8.12: Monday, 2021-08-30\n- 3.8.13: Wednesday, 2022-03-16\n- 3.8.14: Tuesday, 2022-09-06\n- 3.8.15: Tuesday, 2022-10-11\n- 3.8.16: Tuesday, 2022-12-06\n- 3.8.17: Tuesday, 2023-06-06\n- 3.8.18: Thursday, 2023-08-24\n- 3.8.19: Tuesday, 2024-03-19\n- 3.8.20: Friday, 2024-09-06 (final security release)\n\nFeatures for 3.8\n\nSome of the notable features of Python 3.8 include:\n\n- PEP 570, Positional-only arguments\n- PEP 572, Assignment Expressions\n- PEP 574, Pickle protocol 5 with out-of-band data\n- PEP 578, Runtime audit hooks\n- PEP 587, Python Initialization Configuration\n- PEP 590, Vectorcall: a fast calling protocol for CPython\n- Typing-related: PEP 591 (Final qualifier), PEP 586 (Literal types),\n and PEP 589 (TypedDict)\n- Parallel filesystem cache for compiled bytecode\n- Debug builds share ABI as release builds\n- f-strings support a handy = specifier for debugging\n- continue is now legal in finally: blocks\n- on Windows, the default asyncio event loop is now ProactorEventLoop\n- on macOS, the spawn start method is now used by default in\n multiprocessing\n- multiprocessing can now use shared memory segments to avoid pickling\n costs between processes\n- typed_ast is merged back to CPython\n- LOAD_GLOBAL is now 40% faster\n- pickle now uses Protocol 4 by default, improving performance\n\nThere are many other interesting changes, please consult the \"What's\nNew\" page in the documentation for a full list.\n\nCopyright\n\nThis document has been placed in the public domain."},"source":{"kind":"string","value":"python-peps"},"added":{"kind":"string","value":"2024-10-18T13:23:33.331235"},"created":{"kind":"timestamp","value":"2018-01-27T00:00:00","string":"2018-01-27T00:00:00"},"metadata":{"kind":"string","value":"{\n \"license\": \"Public Domain\",\n \"url\": \"https://peps.python.org/pep-0569/\",\n \"authors\": [\n \"Łukasz Langa\"\n ],\n \"pep_number\": \"0569\",\n \"pandoc_version\": \"3.5\"\n}"}}},{"rowIdx":562,"cells":{"id":{"kind":"string","value":"0423"},"text":{"kind":"string","value":"PEP: 423 Title: Naming conventions and recipes related to packaging\nVersion: $Revision$ Last-Modified: $Date$ Author: Benoit Bryon\n Discussions-To: distutils-sig@python.org Status:\nDeferred Type: Informational Topic: Packaging Content-Type: text/x-rst\nCreated: 24-May-2012 Post-History:\n\nAbstract\n\nThis document deals with:\n\n- names of Python projects,\n- names of Python packages or modules being distributed,\n- namespace packages.\n\nIt provides guidelines and recipes for distribution authors:\n\n- new projects should follow the guidelines below.\n- existing projects should be aware of these guidelines and can follow\n specific recipes for existing projects.\n\nPEP Deferral\n\nFurther consideration of this PEP has been deferred at least until after\nPEP 426 (package metadata 2.0) and related updates have been resolved.\n\nTerminology\n\nReference is packaging terminology in Python documentation.\n\nRelationship with other PEPs\n\n- 8#package-and-module-names deals with code style guide, including\n names of Python packages and modules. It covers syntax of\n package/modules names.\n- PEP 345 deals with packaging metadata, and defines name argument of\n the packaging.core.setup() function.\n- PEP 420 deals with namespace packages. It brings support of\n namespace packages to Python core. Before, namespaces packages were\n implemented by external libraries.\n- PEP 3108 deals with transition between Python 2.x and Python 3.x\n applied to standard library: some modules to be deleted, some to be\n renamed. It points out that naming conventions matter and is an\n example of transition plan.\n\nOverview\n\nHere is a summarized list of guidelines you should follow to choose\nnames:\n\n- understand and respect namespace ownership.\n- if your project is related to another project or community:\n - search for conventions in main project's documentation, because\n projects should organize community contributions.\n - follow specific project or related community conventions, if\n any.\n - if there is no convention, follow a standard naming pattern.\n- make sure your project name is unique, i.e. avoid duplicates:\n - use top-level namespace for ownership,\n - check for name availability,\n - register names with PyPI.\n- make sure distributed packages and modules names are unique, unless\n you explicitly want to distribute alternatives to existing packages\n or modules. Using the same value for package/module name and project\n name is the recommended way to achieve this.\n- distribute only one package or module at a time, unless you know\n what you are doing. It makes it possible to apply the \"use a single\n name\" rule, and thus make names consistent.\n- make it easy to discover and remember your project:\n - use as much memorable names as possible,\n - use as much meaningful names as possible,\n - use other packaging metadata.\n- avoid deep nesting. Flat things are easier to use and remember than\n nested ones:\n - one or two namespace levels are recommended, because they are\n almost always enough.\n - even if not recommended, three levels are, de facto, a common\n case.\n - in most cases, you should not need more than three levels.\n- follow PEP 8 for syntax of package and module names.\n- if you followed specific conventions, or if your project is intended\n to receive contributions from the community, organize community\n contributions.\n- if still in doubt, ask.\n\nIf in doubt, ask\n\nIf you feel unsure after reading this document, ask Python community on\nIRC or on a mailing list.\n\nTop-level namespace relates to code ownership\n\nThis helps avoid clashes between project names.\n\nOwnership could be:\n\n- an individual. Example: gp.fileupload is owned and maintained by\n Gael Pasgrimaud.\n- an organization. Examples:\n - zest.releaser is owned and maintained by Zest Software.\n - Django is owned and maintained by the Django Software\n Foundation.\n- a group or community. Example: sphinx is maintained by developers of\n the Sphinx project, not only by its author, Georg Brandl.\n- a group or community related to another package. Example:\n collective.recaptcha is owned by its author: David Glick,\n Groundwire. But the \"collective\" namespace is owned by Plone\n community.\n\nRespect ownership\n\nUnderstand the purpose of namespace before you use it.\n\nDon't plug into a namespace you don't own, unless explicitly authorized.\n\nIf in doubt, ask.\n\nAs an example, don't plug in \"django.contrib\" namespace because it is\nmanaged by Django's core contributors.\n\nExceptions can be defined by project authors. See Organize community\ncontributions below.\n\nAlso, this rule applies to non-Python projects.\n\nAs an example, don't use \"apache\" as top-level namespace: \"Apache\" is\nthe name of an existing project (in the case of \"Apache\", it is also a\ntrademark).\n\nPrivate (including closed-source) projects use a namespace\n\n... because private projects are owned by somebody. So apply the\nownership rule.\n\nFor internal/customer projects, use your company name as the namespace.\n\nThis rule applies to closed-source projects.\n\nAs an example, if you are creating a \"climbing\" project for the \"Python\nSport\" company: use \"pythonsport.climbing\" name, even if it is closed\nsource.\n\nIndividual projects use a namespace\n\n... because they are owned by individuals. So apply the ownership rule.\n\nThere is no shame in releasing a project as open source even if it has\nan \"internal\" or \"individual\" name.\n\nIf the project comes to a point where the author wants to change\nownership (i.e. the project no longer belongs to an individual), keep in\nmind it is easy to rename the project.\n\nCommunity-owned projects can avoid namespace packages\n\nIf your project is generic enough (i.e. it is not a contrib to another\nproduct or framework), you can avoid namespace packages. The base\ncondition is generally that your project is owned by a group (i.e. the\ndevelopment team) which is dedicated to this project.\n\nOnly use a \"shared\" namespace if you really intend the code to be\ncommunity owned.\n\nAs an example, sphinx project belongs to the Sphinx development team.\nThere is no need to have some \"sphinx\" namespace package with only one\n\"sphinx.sphinx\" project inside.\n\nIn doubt, use an individual/organization namespace\n\nIf your project is really experimental, best choice is to use an\nindividual or organization namespace:\n\n- it allows projects to be released early.\n- it won't block a name if the project is abandoned.\n- it doesn't block future changes. When a project becomes mature and\n there is no reason to keep individual ownership, it remains possible\n to rename the project.\n\nUse a single name\n\nDistribute only one package (or only one module) per project, and use\npackage (or module) name as project name.\n\n- It avoids possible confusion between project name and distributed\n package or module name.\n\n- It makes the name consistent.\n\n- It is explicit: when one sees project name, he guesses\n package/module name, and vice versa.\n\n- It also limits implicit clashes between package/module names. By\n using a single name, when you register a project name to PyPI, you\n also perform a basic package/module name availability verification.\n\n As an example, pipeline, python-pipeline and django-pipeline all\n distribute a package or module called \"pipeline\". So installing two\n of them leads to errors. This issue wouldn't have occurred if these\n distributions used a single name.\n\nYes:\n\n- Package name: \"kheops.pyramid\", i.e. import kheops.pyramid\n- Project name: \"kheops.pyramid\", i.e. pip install kheops.pyramid\n\nNo:\n\n- Package name: \"kheops\"\n- Project name: \"KheopsPyramid\"\n\nNote\n\nFor historical reasons, PyPI contains many distributions where project\nand distributed package/module names differ.\n\nMultiple packages/modules should be rare\n\nTechnically, Python distributions can provide multiple packages and/or\nmodules. See setup script reference for details.\n\nSome distributions actually do. As an example, setuptools and distribute\nare both declaring \"pkg_resources\", \"easy_install\" and \"site\" modules in\naddition to respective \"setuptools\" and \"distribute\" packages.\n\nConsider this use case as exceptional. In most cases, you don't need\nthis feature. So a distribution should provide only one package or\nmodule at a time.\n\nDistinct names should be rare\n\nA notable exception to the Use a single name rule is when you explicitly\nneed distinct names.\n\nAs an example, the Pillow project provides an alternative to the\noriginal PIL distribution. Both projects distribute a \"PIL\" package.\n\nConsider this use case as exceptional. In most cases, you don't need\nthis feature. So a distributed package name should be equal to project\nname.\n\nFollow PEP 8 for syntax of package and module names\n\nPEP 8 <8#package-and-module-names> applies to names of Python packages\nand modules.\n\nIf you Use a single name, PEP 8 <8#package-and-module-names> also\napplies to project names. The exceptions are namespace packages, where\ndots are required in project name.\n\nPick memorable names\n\nOne important thing about a project name is that it be memorable.\n\nAs an example, celery is not a meaningful name. At first, it is not\nobvious that it deals with message queuing. But it is memorable, partly\nbecause it can be used to feed a RabbitMQ server.\n\nPick meaningful names\n\nAsk yourself \"how would I describe in one sentence what this name is\nfor?\", and then \"could anyone have guessed that by looking at the\nname?\".\n\nAs an example, DateUtils is a meaningful name. It is obvious that it\ndeals with utilities for dates.\n\nWhen you are using namespaces, try to make each part meaningful.\n\nUse packaging metadata\n\nConsider project names as unique identifiers on PyPI:\n\n- it is important that these identifiers remain human-readable.\n- it is even better when these identifiers are meaningful.\n- but the primary purpose of identifiers is not to classify or\n describe projects.\n\nClassifiers and keywords metadata are made for categorization of\ndistributions. Summary and description metadata are meant to describe\nthe project.\n\nAs an example, there is a \"Framework :: Twisted\" classifier. Even if\nnames are quite heterogeneous (they don't follow a particular pattern),\nwe get the list.\n\nIn order to Organize community contributions, conventions about names\nand namespaces matter, but conventions about metadata should be even\nmore important.\n\nAs an example, we can find Plone portlets in many places:\n\n- plone.portlet.*\n- collective.portlet.*\n- collective.portlets.*\n- collective.*.portlets\n- some vendor-related projects such as \"quintagroup.portlet.cumulus\"\n- and even projects where \"portlet\" pattern doesn't appear in the\n name.\n\nEven if Plone community has conventions, using the name to categorize\ndistributions is inappropriate. It's impossible to get the full list of\ndistributions that provide portlets for Plone by filtering on names. But\nit would be possible if all these distributions used \"Framework ::\nPlone\" classifier and \"portlet\" keyword.\n\nAvoid deep nesting\n\nThe Zen of Python <20> says \"Flat is better than nested\".\n\nTwo levels is almost always enough\n\nDon't define everything in deeply nested hierarchies: you will end up\nwith projects and packages like \"pythonsport.common.maps.forest\". This\ntype of name is both verbose and cumbersome (e.g. if you have many\nimports from the package).\n\nFurthermore, big hierarchies tend to break down over time as the\nboundaries between different packages blur.\n\nThe consensus is that two levels of nesting are preferred.\n\nFor example, we have plone.principalsource instead of\nplone.source.principal or something like that. The name is shorter, the\npackage structure is simpler, and there would be very little to gain\nfrom having three levels of nesting here. It would be impractical to try\nto put all \"core Plone\" sources (a source is kind of vocabulary) into\nthe plone.source.* namespace, in part because some sources are part of\nother packages, and in part because sources already exist in other\nplaces. Had we made a new namespace, it would be inconsistently used\nfrom the start.\n\nYes: \"pyranha\"\n\nYes: \"pythonsport.climbing\"\n\nYes: \"pythonsport.forestmap\"\n\nNo: \"pythonsport.maps.forest\"\n\nUse only one level for ownership\n\nDon't use 3 levels to set individual/organization ownership in a\ncommunity namespace.\n\nAs an example, let's consider:\n\n- you are plugging into a community namespace, such as \"collective\".\n- and you want to add a more restrictive \"ownership\" level, to avoid\n clashes inside the community.\n\nIn such a case, you'd better use the most restrictive ownership level as\nfirst level.\n\nAs an example, where \"collective\" is a major community namespace that\n\"gergovie\" belongs to, and \"vercingetorix\" it the name of \"gergovie\"\nauthor:\n\nNo: \"collective.vercingetorix.gergovie\"\n\nYes: \"vercingetorix.gergovie\"\n\nDon't use namespace levels for categorization\n\nUse packaging metadata instead.\n\nDon't use more than 3 levels\n\nTechnically, you can create deeply nested hierarchies. However, in most\ncases, you shouldn't need it.\n\nNote\n\nEven communities where namespaces are standard don't use more than 3\nlevels.\n\nConventions for communities or related projects\n\nFollow community or related project conventions, if any\n\nProjects or related communities can have specific conventions, which may\ndiffer from those explained in this document.\n\nIn such a case, they should declare specific conventions in\ndocumentation.\n\nSo, if your project belongs to another project or to a community, first\nlook for specific conventions in main project's documentation.\n\nIf there is no specific conventions, follow the ones declared in this\ndocument.\n\nAs an example, Plone community releases community contributions in the\n\"collective\" namespace package. It differs from the standard namespace\nfor contributions proposed here. But since it is documented, there is no\nambiguity and you should follow this specific convention.\n\nUse standard pattern for community contributions\n\nWhen no specific rule is defined, use the\n${MAINPROJECT}contrib.${PROJECT} pattern to store community\ncontributions for any product or framework, where:\n\n- ${MAINPROJECT} is the name of the related project. \"pyranha\" in the\n example below.\n- ${PROJECT} is the name of your project. \"giantteeth\" in the example\n below.\n\nAs an example:\n\n- you are the author of \"pyranha\" project. You own the \"pyranha\"\n namespace.\n- you didn't defined specific naming conventions for community\n contributions.\n- a third-party developer wants to publish a \"giantteeth\" project\n related to your \"pyranha\" project in a community namespace. So he\n should publish it as \"pyranhacontrib.giantteeth\".\n\nIt is the simplest way to Organize community contributions.\n\nNote\n\nWhy ${MAINPROJECT}contrib.* pattern?\n\n- ${MAINPROJECT}c.* is not explicit enough. As examples, \"zc\" belongs\n to \"Zope Corporation\" whereas \"z3c\" belongs to \"Zope 3 community\".\n- ${MAINPROJECT}community is too long.\n- ${MAINPROJECT}community conflicts with existing namespaces such as\n \"iccommunity\" or \"PyCommunity\".\n- ${MAINPROJECT}.contrib.* is inside ${MAINPROJECT} namespace, i.e. it\n is owned by ${MAINPROJECT} authors. It breaks the Top-level\n namespace relates to code ownership rule.\n- ${MAINPROJECT}.contrib.* breaks the Avoid deep nesting rule.\n- names where ${MAINPROJECT} doesn't appear are not explicit enough,\n i.e. nobody can guess they are related to ${MAINPROJECT}. As an\n example, it is not obvious that \"collective.*\" belongs to Plone\n community.\n- {$DIST}contrib.* looks like existing sphinxcontrib-* packages. But\n sphinxcontrib-* is actually about Sphinx contrib, so this is not a\n real conflict... In fact, the \"contrib\" suffix was inspired by\n \"sphinxcontrib\".\n\nOrganize community contributions\n\nThis is the counterpart of the follow community conventions and standard\npattern for contributions rules.\n\nActions:\n\n- Choose a naming convention for community contributions.\n- If it is not the default, then document it.\n - if you use the default convention, then this document should be\n enough. Don't repeat it. You may reference it.\n - else, tell users about custom conventions in project's\n \"contribute\" or \"create modules\" documentation.\n- Also recommend the use of additional metadata, such as classifiers\n and keywords.\n\nAbout convention choices:\n\n- New projects should choose the default contrib pattern.\n\n- Existing projects with community contributions should start with\n custom conventions. Then they can Promote migrations.\n\n It means that existing community conventions don't have to be\n changed. But, at least, they should be explicitly documented.\n\nExample: \"pyranha\" is your project name and package name. Tell\ncontributors that:\n\n- pyranha-related distributions should use the \"pyranha\" keyword\n- pyranha-related distributions providing templates should also use\n \"templates\" keyword.\n- community contributions should be released under \"pyranhacontrib\"\n namespace (i.e. use \"pyranhacontrib.*\" pattern).\n\nRegister names with PyPI\n\nPyPI is the central place for distributions in Python community. So, it\nis also the place where to register project and package names.\n\nSee Registering with the Package Index for details.\n\nRecipes\n\nThe following recipes will help you follow the guidelines and\nconventions above.\n\nHow to check for name availability?\n\nBefore you choose a project name, make sure it hasn't already been\nregistered in the following locations:\n\n- PyPI\n- that's all. PyPI is the only official place.\n\nAs an example, you could also check in various locations such as popular\ncode hosting services, but keep in mind that PyPI is the only place you\ncan register for names in Python community.\n\nThat's why it is important you register names with PyPI.\n\nAlso make sure the names of distributed packages or modules haven't\nalready been registered:\n\n- in the Python Standard Library.\n- inside projects at PyPI. There is currently no helper for that.\n Notice that the more projects follow the use a single name rule, the\n easier is the verification.\n- you may ask the community.\n\nThe use a single name rule also helps you avoid clashes with package\nnames: if a project name is available, then the package name has good\nchances to be available too.\n\nHow to rename a project?\n\nRenaming a project is possible, but keep in mind that it will cause some\nconfusions. So, pay particular attention to README and documentation, so\nthat users understand what happened.\n\n1. First of all, do not remove legacy distributions from PyPI. Because\n some users may be using them.\n2. Copy the legacy project, then change names (project and\n package/module). Pay attention to, at least:\n - packaging files,\n - folder name that contains source files,\n - documentation, including README,\n - import statements in code.\n3. Assign Obsoletes-Dist metadata to new distribution in setup.cfg\n file. See\n PEP 345 about Obsolete-Dist <345#obsoletes-dist-multiple-use> and\n setup.cfg specification.\n4. Release a new version of the renamed project, then publish it.\n5. Edit legacy project:\n - add dependency to new project,\n - drop everything except packaging stuff,\n - add the Development Status :: 7 - Inactive classifier in setup\n script,\n - publish a new release.\n\nSo, users of the legacy package:\n\n- can continue using the legacy distributions at a deprecated version,\n- can upgrade to last version of legacy distribution, which is\n empty...\n- ... and automatically download new distribution as a dependency of\n the legacy one.\n\nUsers who discover the legacy project see it is inactive.\n\nImproved handling of renamed projects on PyPI\n\nIf many projects follow Renaming howto recipe, then many legacy\ndistributions will have the following characteristics:\n\n- Development Status :: 7 - Inactive classifier.\n- latest version is empty, except packaging stuff.\n- latest version \"redirects\" to another distribution. E.g. it has a\n single dependency on the renamed project.\n- referenced as Obsoletes-Dist in a newer distribution.\n\nSo it will be possible to detect renamed projects and improve\nreadability on PyPI. So that users can focus on active distributions.\nBut this feature is not required now. There is no urge. It won't be\ncovered in this document.\n\nHow to apply naming guidelines on existing projects?\n\nThere is no obligation for existing projects to be renamed. The choice\nis left to project authors and mainteners for obvious reasons.\n\nHowever, project authors are invited to:\n\n- at least, state about current naming.\n- then plan and promote migration.\n- optionally actually rename existing project or distributed\n packages/modules.\n\nState about current naming\n\nThe important thing, at first, is that you state about current choices:\n\n- Ask yourself \"why did I choose the current name?\", then document it.\n- If there are differences with the guidelines provided in this\n document, you should tell your users.\n- If possible, create issues in the project's bugtracker, at least for\n record. Then you are free to resolve them later, or maybe mark them\n as \"wontfix\".\n\nProjects that are meant to receive contributions from community should\nalso organize community contributions.\n\nPromote migrations\n\nEvery Python developer should migrate whenever possible, or promote the\nmigrations in their respective communities.\n\nApply these guidelines on your projects, then the community will see it\nis safe.\n\nIn particular, \"leaders\" such as authors of popular projects are\ninfluential, they have power and, thus, responsibility over communities.\n\nApply these guidelines on popular projects, then communities will adopt\nthe conventions too.\n\nProjects should promote migrations when they release a new (major)\nversion, particularly if this version introduces support for Python 3.x,\nnew standard library's packaging or namespace packages.\n\nOpportunity\n\nAs of Python 3.3 being developed:\n\n- many projects are not Python 3.x compatible. It includes \"big\"\n products or frameworks. It means that many projects will have to do\n a migration to support Python 3.x.\n- packaging (aka distutils2) is on the starting blocks. When it is\n released, projects will be invited to migrate and use new packaging.\n- PEP 420 brings official support of namespace packages to Python.\n\nIt means that most active projects should be about to migrate in the\nnext year(s) to support Python 3.x, new packaging or new namespace\npackages.\n\nSuch an opportunity is unique and won't come again soon! So let's\nintroduce and promote naming conventions as soon as possible (i.e. now).\n\nReferences\n\nAdditional background:\n\n- Martin Aspeli's article about names. Some parts of this document are\n quotes from this article.\n- in development official packaging documentation.\n- The Hitchhiker's Guide to Packaging, which has an empty placeholder\n for \"naming specification\".\n\nReferences and footnotes:\n\nCopyright\n\nThis document has been placed in the public domain.\n\n\f\n\n Local Variables: mode: indented-text indent-tabs-mode: nil\n sentence-end-double-space: t fill-column: 70 coding: utf-8 End:"},"source":{"kind":"string","value":"python-peps"},"added":{"kind":"string","value":"2024-10-18T13:23:33.381389"},"created":{"kind":"timestamp","value":"2012-05-24T00:00:00","string":"2012-05-24T00:00:00"},"metadata":{"kind":"string","value":"{\n \"license\": \"Public Domain\",\n \"url\": \"https://peps.python.org/pep-0423/\",\n \"authors\": [\n \"Benoit Bryon\"\n ],\n \"pep_number\": \"0423\",\n \"pandoc_version\": \"3.5\"\n}"}}},{"rowIdx":563,"cells":{"id":{"kind":"string","value":"0203"},"text":{"kind":"string","value":"PEP: 203 Title: Augmented Assignments Author: Thomas Wouters\n Status: Final Type: Standards Track Content-Type:\ntext/x-rst Created: 13-Jul-2000 Python-Version: 2.0 Post-History:\n14-Aug-2000\n\nIntroduction\n\nThis PEP describes the augmented assignment proposal for Python 2.0.\nThis PEP tracks the status and ownership of this feature, slated for\nintroduction in Python 2.0. It contains a description of the feature and\noutlines changes necessary to support the feature. This PEP summarizes\ndiscussions held in mailing list forums[1], and provides URLs for\nfurther information where appropriate. The CVS revision history of this\nfile contains the definitive historical record.\n\nProposed Semantics\n\nThe proposed patch that adds augmented assignment to Python introduces\nthe following new operators:\n\n += -= *= /= %= **= <<= >>= &= ^= |=\n\nThey implement the same operator as their normal binary form, except\nthat the operation is done in-place when the left-hand side object\nsupports it, and that the left-hand side is only evaluated once.\n\nThey truly behave as augmented assignment, in that they perform all of\nthe normal load and store operations, in addition to the binary\noperation they are intended to do. So, given the expression:\n\n x += y\n\nThe object x is loaded, then y is added to it, and the resulting object\nis stored back in the original place. The precise action performed on\nthe two arguments depends on the type of x, and possibly of y.\n\nThe idea behind augmented assignment in Python is that it isn't just an\neasier way to write the common practice of storing the result of a\nbinary operation in its left-hand operand, but also a way for the\nleft-hand operand in question to know that it should operate on itself,\nrather than creating a modified copy of itself.\n\nTo make this possible, a number of new hooks are added to Python classes\nand C extension types, which are called when the object in question is\nused as the left hand side of an augmented assignment operation. If the\nclass or type does not implement the in-place hooks, the normal hooks\nfor the particular binary operation are used.\n\nSo, given an instance object x, the expression:\n\n x += y\n\ntries to call x.__iadd__(y), which is the in-place variant of __add__ .\nIf __iadd__ is not present, x.__add__(y) is attempted, and finally\ny.__radd__(x) if __add__ is missing too. There is no right-hand-side\nvariant of __iadd__, because that would require for y to know how to\nin-place modify x, which is unsafe to say the least. The __iadd__ hook\nshould behave similar to __add__, returning the result of the operation\n(which could be self) which is to be assigned to the variable x.\n\nFor C extension types, the hooks are members of the PyNumberMethods and\nPySequenceMethods structures. Some special semantics apply to make the\nuse of these methods, and the mixing of Python instance objects and C\ntypes, as unsurprising as possible.\n\nIn the generic case of x y (or a similar case using the\nPyNumber_InPlace API functions) the principal object being operated on\nis x. This differs from normal binary operations, where x and y could be\nconsidered co-operating, because unlike in binary operations, the\noperands in an in-place operation cannot be swapped. However, in-place\noperations do fall back to normal binary operations when in-place\nmodification is not supported, resulting in the following rules:\n\n- If the left-hand object (x) is an instance object, and it has a\n __coerce__ method, call that function with y as the argument. If\n coercion succeeds, and the resulting left-hand object is a different\n object than x, stop processing it as in-place and call the\n appropriate function for the normal binary operation, with the\n coerced x and y as arguments. The result of the operation is\n whatever that function returns.\n\n If coercion does not yield a different object for x, or x does not\n define a __coerce__ method, and x has the appropriate __ihook__ for\n this operation, call that method with y as the argument, and the\n result of the operation is whatever that method returns.\n\n- Otherwise, if the left-hand object is not an instance object, but\n its type does define the in-place function for this operation, call\n that function with x and y as the arguments, and the result of the\n operation is whatever that function returns.\n\n Note that no coercion on either x or y is done in this case, and\n it's perfectly valid for a C type to receive an instance object as\n the second argument; that is something that cannot happen with\n normal binary operations.\n\n- Otherwise, process it exactly as a normal binary operation (not\n in-place), including argument coercion. In short, if either argument\n is an instance object, resolve the operation through __coerce__,\n __hook__ and __rhook__. Otherwise, both objects are C types, and\n they are coerced and passed to the appropriate function.\n\n- If no way to process the operation can be found, raise a TypeError\n with an error message specific to the operation.\n\n- Some special casing exists to account for the case of + and *, which\n have a special meaning for sequences: for +, sequence concatenation,\n no coercion what so ever is done if a C type defines sq_concat or\n sq_inplace_concat. For *, sequence repeating, y is converted to a C\n integer before calling either sq_inplace_repeat and sq_repeat. This\n is done even if y is an instance, though not if x is an instance.\n\nThe in-place function should always return a new reference, either to\nthe old x object if the operation was indeed performed in-place, or to a\nnew object.\n\nRationale\n\nThere are two main reasons for adding this feature to Python: simplicity\nof expression, and support for in-place operations. The end result is a\ntradeoff between simplicity of syntax and simplicity of expression; like\nmost new features, augmented assignment doesn't add anything that was\npreviously impossible. It merely makes these things easier to do.\n\nAdding augmented assignment will make Python's syntax more complex.\nInstead of a single assignment operation, there are now twelve\nassignment operations, eleven of which also perform a binary operation.\nHowever, these eleven new forms of assignment are easy to understand as\nthe coupling between assignment and the binary operation, and they\nrequire no large conceptual leap to understand. Furthermore, languages\nthat do have augmented assignment have shown that they are a popular,\nmuch used feature. Expressions of the form:\n\n = \n\nare common enough in those languages to make the extra syntax\nworthwhile, and Python does not have significantly fewer of those\nexpressions. Quite the opposite, in fact, since in Python you can also\nconcatenate lists with a binary operator, something that is done quite\nfrequently. Writing the above expression as:\n\n = \n\nis both more readable and less error prone, because it is instantly\nobvious to the reader that it is that is being changed, and not \nthat is being replaced by something almost, but not quite, entirely\nunlike .\n\nThe new in-place operations are especially useful to matrix calculation\nand other applications that require large objects. In order to\nefficiently deal with the available program memory, such packages cannot\nblindly use the current binary operations. Because these operations\nalways create a new object, adding a single item to an existing (large)\nobject would result in copying the entire object (which may cause the\napplication to run out of memory), add the single item, and then\npossibly delete the original object, depending on reference count.\n\nTo work around this problem, the packages currently have to use methods\nor functions to modify an object in-place, which is definitely less\nreadable than an augmented assignment expression. Augmented assignment\nwon't solve all the problems for these packages, since some operations\ncannot be expressed in the limited set of binary operators to start\nwith, but it is a start. PEP 211 is looking at adding new operators.\n\nNew methods\n\nThe proposed implementation adds the following 11 possible hooks which\nPython classes can implement to overload the augmented assignment\noperations:\n\n __iadd__\n __isub__\n __imul__\n __idiv__\n __imod__\n __ipow__\n __ilshift__\n __irshift__\n __iand__\n __ixor__\n __ior__\n\nThe i in __iadd__ stands for in-place.\n\nFor C extension types, the following struct members are added.\n\nTo PyNumberMethods:\n\n binaryfunc nb_inplace_add;\n binaryfunc nb_inplace_subtract;\n binaryfunc nb_inplace_multiply;\n binaryfunc nb_inplace_divide;\n binaryfunc nb_inplace_remainder;\n binaryfunc nb_inplace_power;\n binaryfunc nb_inplace_lshift;\n binaryfunc nb_inplace_rshift;\n binaryfunc nb_inplace_and;\n binaryfunc nb_inplace_xor;\n binaryfunc nb_inplace_or;\n\nTo PySequenceMethods:\n\n binaryfunc sq_inplace_concat;\n intargfunc sq_inplace_repeat;\n\nIn order to keep binary compatibility, the tp_flags TypeObject member is\nused to determine whether the TypeObject in question has allocated room\nfor these slots. Until a clean break in binary compatibility is made\n(which may or may not happen before 2.0) code that wants to use one of\nthe new struct members must first check that they are available with the\nPyType_HasFeature() macro:\n\n if (PyType_HasFeature(x->ob_type, Py_TPFLAGS_HAVE_INPLACE_OPS) &&\n x->ob_type->tp_as_number && x->ob_type->tp_as_number->nb_inplace_add) {\n /* ... */\n\nThis check must be made even before testing the method slots for NULL\nvalues! The macro only tests whether the slots are available, not\nwhether they are filled with methods or not.\n\nImplementation\n\nThe current implementation of augmented assignment[2] adds, in addition\nto the methods and slots already covered, 13 new bytecodes and 13 new\nAPI functions.\n\nThe API functions are simply in-place versions of the current\nbinary-operation API functions:\n\n PyNumber_InPlaceAdd(PyObject *o1, PyObject *o2);\n PyNumber_InPlaceSubtract(PyObject *o1, PyObject *o2);\n PyNumber_InPlaceMultiply(PyObject *o1, PyObject *o2);\n PyNumber_InPlaceDivide(PyObject *o1, PyObject *o2);\n PyNumber_InPlaceRemainder(PyObject *o1, PyObject *o2);\n PyNumber_InPlacePower(PyObject *o1, PyObject *o2);\n PyNumber_InPlaceLshift(PyObject *o1, PyObject *o2);\n PyNumber_InPlaceRshift(PyObject *o1, PyObject *o2);\n PyNumber_InPlaceAnd(PyObject *o1, PyObject *o2);\n PyNumber_InPlaceXor(PyObject *o1, PyObject *o2);\n PyNumber_InPlaceOr(PyObject *o1, PyObject *o2);\n PySequence_InPlaceConcat(PyObject *o1, PyObject *o2);\n PySequence_InPlaceRepeat(PyObject *o, int count);\n\nThey call either the Python class hooks (if either of the objects is a\nPython class instance) or the C type's number or sequence methods.\n\nThe new bytecodes are:\n\n INPLACE_ADD\n INPLACE_SUBTRACT\n INPLACE_MULTIPLY\n INPLACE_DIVIDE\n INPLACE_REMAINDER\n INPLACE_POWER\n INPLACE_LEFTSHIFT\n INPLACE_RIGHTSHIFT\n INPLACE_AND\n INPLACE_XOR\n INPLACE_OR\n ROT_FOUR\n DUP_TOPX\n\nThe INPLACE_* bytecodes mirror the BINARY_* bytecodes, except that they\nare implemented as calls to the InPlace API functions. The other two\nbytecodes are utility bytecodes: ROT_FOUR behaves like ROT_THREE except\nthat the four topmost stack items are rotated.\n\nDUP_TOPX is a bytecode that takes a single argument, which should be an\ninteger between 1 and 5 (inclusive) which is the number of items to\nduplicate in one block. Given a stack like this (where the right side of\nthe list is the top of the stack):\n\n [1, 2, 3, 4, 5]\n\nDUP_TOPX 3 would duplicate the top 3 items, resulting in this stack:\n\n [1, 2, 3, 4, 5, 3, 4, 5]\n\nDUP_TOPX with an argument of 1 is the same as DUP_TOP. The limit of 5 is\npurely an implementation limit . The implementation of augmented\nassignment requires only DUP_TOPX with an argument of 2 and 3, and could\ndo without this new opcode at the cost of a fair number of DUP_TOP and\nROT_*.\n\nOpen Issues\n\nThe PyNumber_InPlace API is only a subset of the normal PyNumber API:\nonly those functions that are required to support the augmented\nassignment syntax are included. If other in-place API functions are\nneeded, they can be added later.\n\nThe DUP_TOPX bytecode is a conveniency bytecode, and is not actually\nnecessary. It should be considered whether this bytecode is worth\nhaving. There seems to be no other possible use for this bytecode at\nthis time.\n\nCopyright\n\nThis document has been placed in the public domain.\n\nReferences\n\n[1] http://www.python.org/pipermail/python-list/2000-June/059556.html\n\n[2] http://sourceforge.net/patch?func=detailpatch&patch_id=100699&group_id=5470"},"source":{"kind":"string","value":"python-peps"},"added":{"kind":"string","value":"2024-10-18T13:23:33.396881"},"created":{"kind":"timestamp","value":"2000-07-13T00:00:00","string":"2000-07-13T00:00:00"},"metadata":{"kind":"string","value":"{\n \"license\": \"Public Domain\",\n \"url\": \"https://peps.python.org/pep-0203/\",\n \"authors\": [\n \"Thomas Wouters\"\n ],\n \"pep_number\": \"0203\",\n \"pandoc_version\": \"3.5\"\n}"}}},{"rowIdx":564,"cells":{"id":{"kind":"string","value":"0603"},"text":{"kind":"string","value":"PEP: 603 Title: Adding a frozenmap type to collections Version:\n$Revision$ Last-Modified: $Date$ Author: Yury Selivanov\n Discussions-To:\nhttps://discuss.python.org/t/pep-603-adding-a-frozenmap-type-to-collections/2318/\nStatus: Draft Type: Standards Track Content-Type: text/x-rst Created:\n12-Sep-2019 Post-History: 12-Sep-2019\n\nAbstract\n\nA persistent data structure is defined as a data structure that\npreserves the previous version of the data when the data is modified.\nSuch data structures are effectively immutable, as operations on them do\nnot update the structure in-place, but instead always yield a new\nupdated structure (see[1] for more details.)\n\nThis PEP proposes to add a new fully persistent and immutable mapping\ntype called frozenmap to the collections module.\n\nThe bulk of frozenmap's reference implementation is already used in\nCPython to implement the contextvars module.\n\nRationale\n\nPython has two immutable collection types: tuple and frozenset. These\ntypes can be used to represent immutable lists and sets. However, a way\nto represent immutable mappings does not yet exist, and this PEP\nproposes a frozenmap to implement an immutable mapping.\n\nThe proposed frozenmap type:\n\n- implements the collections.abc.Mapping protocol,\n- supports pickling, and\n- provides an API for efficient creation of \"modified\" versions.\n\nThe following use cases illustrate why an immutable mapping is\ndesirable:\n\n- Immutable mappings are hashable which allows their use as dictionary\n keys or set elements.\n\n This hashable property permits functions decorated with\n @functools.lru_cache() to accept immutable mappings as arguments.\n Unlike an immutable mapping, passing a plain dict to such a function\n results in error.\n\n- Immutable mappings can hold complex state. Since immutable mappings\n can be copied by reference, transactional mutation of state can be\n efficiently implemented.\n\n- Immutable mappings can be used to safely share dictionaries across\n thread and asynchronous task boundaries. The immutability makes it\n easier to reason about threads and asynchronous tasks.\n\nLastly, CPython[2] already contains the main portion of the C code\nrequired for the frozenmap implementation. The C code already exists to\nimplement the contextvars module (see PEP 567 for more details.)\nExposing this C code via a public collection type drastically increases\nthe number of users of the code. This leads to increased code quality by\ndiscovering bugs and improving performance which without a frozenmap\ncollection would be very challenging because most programs use the\ncontextvars module indirectly.\n\nSpecification\n\nA new public immutable type frozenmap is added to the collections\nmodule.\n\nConstruction\n\nfrozenmap implements a dict-like construction API:\n\n- frozenmap() creates a new empty immutable mapping;\n- frozenmap(**kwargs) creates a mapping from **kwargs, e.g.\n frozenmap(x=10, y=0, z=-1)\n- frozenmap(collection) creates a mapping from the passed collection\n object. The passed collection object can be:\n - a dict,\n - another frozenmap,\n - an object with an items() method that is expected to return a\n series of key/value tuples, or\n - an iterable of key/value tuples.\n\nData Access\n\nfrozenmap implements the collection.abc.Mapping protocol. Therefore,\ngetters, membership checks, and iteration work the same way that they\nwould for a dict:\n\n m = frozenmap(foo='bar')\n\n assert m['foo'] == 'bar'\n assert m.get('foo') == 'bar'\n assert 'foo' in m\n\n assert 'baz' not in m\n assert m.get('baz', 'missing') == 'missing'\n\n assert m == m\n assert m != frozenmap() # m is not equal to an empty frozenmap\n\n assert len(m) == 1\n\n # etc.\n\nMutation\n\nfrozenmap instances are immutable. That said, it is possible to\nefficiently produce mutated copies of the immutable instance.\n\nThe complexity of mutation operations is O(log N) and the resulting\nfrozenmap copies often consume very little additional memory due to the\nuse of structural sharing (read[3] for more details.)\n\nfrozenmap.including(key, value)\n\nThe method creates a new frozenmap copy with a new key / value pair:\n\n m = frozenmap(foo=1)\n m2 = m.including('bar', 100)\n\n print(m) # will print frozenmap({'foo': 1})\n print(m2) # will print frozenmap({'foo': 1, 'bar': 100})\n\nfrozenmap.excluding(key)\n\nThe method produces a copy of the frozenmap which does not include a\ndeleted key:\n\n m = frozenmap(foo=1, bar=100)\n\n m2 = m.excluding('foo')\n\n print(m) # will print frozenmap({'foo': 1, 'bar': 100})\n print(m2) # will print frozenmap({'bar': 1})\n\n m3 = m.excluding('spam') # will throw a KeyError('spam')\n\nfrozenmap.union(mapping=None, **kw)\n\nThe method produces a copy of the frozenmap and adds or modifies\nmultiple key/values for the created copy. The signature of the method\nmatches the signature of the frozenmap constructor:\n\n m = frozenmap(foo=1)\n\n m2 = m.union({'spam': 'ham'})\n print(m2) # will print frozenmap({'foo': 1, 'spam': 'ham'})\n\n m3 = m.union(foo=100, y=2)\n print(m3) # will print frozenmap({'foo': 100, 'y': 2})\n\n print(m) # will print frozenmap({'foo': 1})\n\nCalling the union() method to add/replace N keys is more efficient than\ncalling the including() method N times.\n\nfrozenmap.mutating()\n\nThe method allows efficient copying of a frozenmap instance with\nmultiple modifications applied. This method is especially useful when\nthe frozenmap in question contains thousands of key/value pairs and\nthere's a need to update many of them in a performance-critical section\nof the code.\n\nThe frozenmap.mutating() method returns a mutable dict-like copy of the\nfrozenmap object: an instance of collections.FrozenMapCopy.\n\nThe FrozenMapCopy objects:\n\n- are copy-on-write views of the data of frozenmap instances they were\n created from;\n- are mutable, although any mutations on them do not affect the\n frozenmap instances they were created from;\n- can be passed to the frozenmap constructor; creating a frozenmap\n from a FrozenMapCopy object is an O(1) operation;\n- have O(log N) complexity for get/set operations; creating them is an\n O(1) operation;\n- have a FrozenMapCopy.close() method that prevents any further\n access/mutation of the data;\n- can be used as a context manager.\n\nThe below example illustrates how mutating() can be used with a context\nmanager:\n\n numbers = frozenmap((i, i ** 2) for i in range(1_000_000))\n\n with numbers.mutating() as copy:\n for i in numbers:\n if not (numbers[i] % 997):\n del copy[i]\n\n numbers_without_997_multiples = frozenmap(copy)\n\n # at this point, *numbers* still has 1_000_000 key/values, and\n # *numbers_without_997_multiples* is a copy of *numbers* without\n # values that are multiples of 997.\n\n for i in numbers:\n if not (numbers[i] % 593):\n del copy[i]\n\n numbers_without_593_multiples = frozenmap(copy)\n\n print(copy[10]) # will print 100.\n\n print(copy[10]) # This will throw a ValueError as *copy*\n # has been closed when the \"with\" block\n # was executed.\n\nIteration\n\nAs frozenmap implements the standard collections.abc.Mapping protocol,\nso all expected methods of iteration are supported:\n\n assert list(m) == ['foo']\n assert list(m.items()) == [('foo', 'bar')]\n assert list(m.keys()) == ['foo']\n assert list(m.values()) == ['bar']\n\nIteration in frozenmap, unlike in dict, does not preserve the insertion\norder.\n\nHashing\n\nfrozenmap instances can be hashable just like tuple objects:\n\n hash(frozenmap(foo='bar')) # works\n hash(frozenmap(foo=[])) # will throw an error\n\nTyping\n\nIt is possible to use the standard typing notation for frozenmaps:\n\n m: frozenmap[str, int] = frozenmap()\n\nImplementation\n\nThe proposed frozenmap immutable type uses a Hash Array Mapped Trie\n(HAMT) data structure. Functional programming languages, like Clojure,\nuse HAMT to efficiently implement immutable hash tables, vectors, and\nsets.\n\nHAMT\n\nThe key design contract of HAMT is the guarantee of a predictable value\nwhen given the hash of a key. For a pair of key and value, the hash of\nthe key can be used to determine the location of value in the hash map\ntree.\n\nImmutable mappings implemented with HAMT have O(log N) performance for\nset() and get() operations. This efficiency is possible because mutation\noperations only affect one branch of the tree, making it possible to\nreuse non-mutated branches, and, therefore, avoiding copying of\nunmodified data.\n\nRead more about HAMT in[4]. The CPython implementation[5] has a fairly\ndetailed description of the algorithm as well.\n\nPerformance\n\n[]\n\nThe above chart demonstrates that:\n\n- frozenmap implemented with HAMT displays near O(1) performance for\n all benchmarked dictionary sizes.\n- dict.copy() becomes less efficient when using around 100-200 items.\n\n[]\n\nFigure 2 compares the lookup costs of dict versus a HAMT-based immutable\nmapping. HAMT lookup time is ~30% slower than Python dict lookups on\naverage. This performance difference exists since traversing a shallow\ntree is less efficient than lookup in a flat continuous array.\n\nFurther to that, quoting[6]: \"[using HAMT] means that in practice while\ninsertions, deletions, and lookups into a persistent hash array mapped\ntrie have a computational complexity of O(log n), for most applications\nthey are effectively constant time, as it would require an extremely\nlarge number of entries to make any operation take more than a dozen\nsteps.\"\n\nDesign Considerations\n\nWhy \"frozenmap\" and not \"FrozenMap\"\n\nThe lower-case \"frozenmap\" resonates well with the frozenset built-in as\nwell as with types like collections.defaultdict.\n\nWhy \"frozenmap\" and not \"frozendict\"\n\n\"Dict\" has a very specific meaning in Python:\n\n- a dict is a concrete implementation of abc.MutableMapping with O(1)\n get and set operations (frozenmap has O(log N) complexity);\n- Python dicts preserve insertion order.\n\nThe proposed frozenmap does not have these mentioned properties.\nInstead, frozenmap has an O(log N) cost of set/get operations, and it\nonly implements the abc.Mapping protocol.\n\nImplementation\n\nThe full implementation of the proposed frozenmap type is available\nat[7]. The package includes C and pure Python implementations of the\ntype.\n\nSee also the HAMT collection implementation as part of the CPython\nproject tree here:[8].\n\nReferences\n\nAcknowledgments\n\nI thank Carol Willing, Łukasz Langa, Larry Hastings, and Guido van\nRossum for their feedback, ideas, edits, and discussions around this\nPEP.\n\nCopyright\n\nThis document is placed in the public domain or under the\nCC0-1.0-Universal license, whichever is more permissive.\n\n[1] https://en.wikipedia.org/wiki/Persistent_data_structure\n\n[2] https://github.com/python/cpython/blob/3.8/Python/hamt.c\n\n[3] https://en.wikipedia.org/wiki/Persistent_data_structure#Trees\n\n[4] https://en.wikipedia.org/wiki/Hash_array_mapped_trie#cite_note-bagwell-1\n\n[5] https://github.com/python/cpython/blob/3.8/Python/hamt.c\n\n[6] https://en.wikipedia.org/wiki/Persistent_data_structure#Trees\n\n[7] https://github.com/MagicStack/immutables\n\n[8] https://github.com/python/cpython/blob/3.8/Python/hamt.c"},"source":{"kind":"string","value":"python-peps"},"added":{"kind":"string","value":"2024-10-18T13:23:33.420550"},"created":{"kind":"timestamp","value":"2019-09-12T00:00:00","string":"2019-09-12T00:00:00"},"metadata":{"kind":"string","value":"{\n \"license\": \"Public Domain\",\n \"url\": \"https://peps.python.org/pep-0603/\",\n \"authors\": [\n \"Yury Selivanov\"\n ],\n \"pep_number\": \"0603\",\n \"pandoc_version\": \"3.5\"\n}"}}},{"rowIdx":565,"cells":{"id":{"kind":"string","value":"0285"},"text":{"kind":"string","value":"PEP: 285 Title: Adding a bool type Author: Guido van Rossum\n Status: Final Type: Standards Track Content-Type:\ntext/x-rst Created: 08-Mar-2002 Python-Version: 2.3 Post-History:\n08-Mar-2002, 30-Mar-2002, 03-Apr-2002\n\nAbstract\n\nThis PEP proposes the introduction of a new built-in type, bool, with\ntwo constants, False and True. The bool type would be a straightforward\nsubtype (in C) of the int type, and the values False and True would\nbehave like 0 and 1 in most respects (for example, False==0 and True==1\nwould be true) except repr() and str(). All built-in operations that\nconceptually return a Boolean result will be changed to return False or\nTrue instead of 0 or 1; for example, comparisons, the \"not\" operator,\nand predicates like isinstance().\n\nReview\n\nI've collected enough feedback to last me a lifetime, so I declare the\nreview period officially OVER. I had Chinese food today; my fortune\ncookie said \"Strong and bitter words indicate a weak cause.\" It reminded\nme of some of the posts against this PEP... :-)\n\nAnyway, here are my BDFL pronouncements. (Executive summary: I'm not\nchanging a thing; all variants are rejected.)\n\n1) Should this PEP be accepted?\n\n => Yes.\n\n There have been many arguments against the PEP. Many of them were\n based on misunderstandings. I've tried to clarify some of the most\n common misunderstandings below in the main text of the PEP. The only\n issue that weighs at all for me is the tendency of newbies to write\n \"if x == True\" where \"if x\" would suffice. More about that below\n too. I think this is not a sufficient reason to reject the PEP.\n\n2) Should str(True) return \"True\" or \"1\"? \"1\" might reduce backwards\n compatibility problems, but looks strange. (repr(True) would always\n return \"True\".)\n\n => \"True\".\n\n Almost all reviewers agree with this.\n\n3) Should the constants be called 'True' and 'False' (similar to None)\n or 'true' and 'false' (as in C++, Java and C99)?\n\n => True and False.\n\n Most reviewers agree that consistency within Python is more\n important than consistency with other languages.\n\n4) Should we strive to eliminate non-Boolean operations on bools in the\n future, through suitable warnings, so that for example True+1 would\n eventually (in Python 3000) be illegal?\n\n => No.\n\n There's a small but vocal minority that would prefer to see\n \"textbook\" bools that don't support arithmetic operations at all,\n but most reviewers agree with me that bools should always allow\n arithmetic operations.\n\n5) Should operator.truth(x) return an int or a bool?\n\n => bool.\n\n Tim Peters believes it should return an int, but almost all other\n reviewers agree that it should return a bool. My rationale:\n operator.truth() exists to force a Boolean context on its argument\n (it calls the C API PyObject_IsTrue()). Whether the outcome is\n reported as int or bool is secondary; if bool exists there's no\n reason not to use it. (Under the PEP, operator.truth() now becomes\n an alias for bool(); that's fine.)\n\n6) Should bool inherit from int?\n\n => Yes.\n\n In an ideal world, bool might be better implemented as a separate\n integer type that knows how to perform mixed-mode arithmetic.\n However, inheriting bool from int eases the implementation\n enormously (in part since all C code that calls PyInt_Check() will\n continue to work -- this returns true for subclasses of int). Also,\n I believe this is right in terms of substitutability: code that\n requires an int can be fed a bool and it will behave the same as 0\n or 1. Code that requires a bool may not work when it is given an\n int; for example, 3 & 4 is 0, but both 3 and 4 are true when\n considered as truth values.\n\n7) Should the name 'bool' be changed?\n\n => No.\n\n Some reviewers have argued for boolean instead of bool, because this\n would be easier to understand (novices may have heard of Boolean\n algebra but may not make the connection with bool) or because they\n hate abbreviations. My take: Python uses abbreviations judiciously\n (like 'def', 'int', 'dict') and I don't think these are a burden to\n understanding. To a newbie, it doesn't matter whether it's called a\n waffle or a bool; it's a new word, and they learn quickly what it\n means.\n\n One reviewer has argued to make the name 'truth'. I find this an\n unattractive name, and would actually prefer to reserve this term\n (in documentation) for the more abstract concept of truth values\n that already exists in Python. For example: \"when a container is\n interpreted as a truth value, an empty container is considered false\n and a non-empty one is considered true.\"\n\n8) Should we strive to require that Boolean operations (like \"if\",\n \"and\", \"not\") have a bool as an argument in the future, so that for\n example \"if []:\" would become illegal and would have to be written\n as \"if bool([]):\" ???\n\n => No!!!\n\n Some people believe that this is how a language with a textbook\n Boolean type should behave. Because it was brought up, others have\n worried that I might agree with this position. Let me make my\n position on this quite clear. This is not part of the PEP's\n motivation and I don't intend to make this change. (See also the\n section \"Clarification\" below.)\n\nRationale\n\nMost languages eventually grow a Boolean type; even C99 (the new and\nimproved C standard, not yet widely adopted) has one.\n\nMany programmers apparently feel the need for a Boolean type; most\nPython documentation contains a bit of an apology for the absence of a\nBoolean type. I've seen lots of modules that defined constants \"False=0\"\nand \"True=1\" (or similar) at the top and used those. The problem with\nthis is that everybody does it differently. For example, should you use\n\"FALSE\", \"false\", \"False\", \"F\" or even \"f\"? And should false be the\nvalue zero or None, or perhaps a truth value of a different type that\nwill print as \"true\" or \"false\"? Adding a standard bool type to the\nlanguage resolves those issues.\n\nSome external libraries (like databases and RPC packages) need to be\nable to distinguish between Boolean and integral values, and while it's\nusually possible to craft a solution, it would be easier if the language\noffered a standard Boolean type. This also applies to Jython: some Java\nclasses have separately overloaded methods or constructors for int and\nboolean arguments. The bool type can be used to select the boolean\nvariant. (The same is apparently the case for some COM interfaces.)\n\nThe standard bool type can also serve as a way to force a value to be\ninterpreted as a Boolean, which can be used to normalize Boolean values.\nWhen a Boolean value needs to be normalized to one of two values,\nbool(x) is much clearer than \"not not x\" and much more concise than\n\n if x:\n return 1\n else:\n return 0\n\nHere are some arguments derived from teaching Python. When showing\npeople comparison operators etc. in the interactive shell, I think this\nis a bit ugly:\n\n >>> a = 13\n >>> b = 12\n >>> a > b\n 1\n >>>\n\nIf this was:\n\n >>> a > b\n True\n >>>\n\nit would require a millisecond less thinking each time a 0 or 1 was\nprinted.\n\nThere's also the issue (which I've seen baffling even experienced\nPythonistas who had been away from the language for a while) that if you\nsee:\n\n >>> cmp(a, b)\n 1\n >>> cmp(a, a)\n 0\n >>>\n\nyou might be tempted to believe that cmp() also returned a truth value,\nwhereas in reality it can return three different values (-1, 0, 1). If\nints were not (normally) used to represent Booleans results, this would\nstand out much more clearly as something completely different.\n\nSpecification\n\nThe following Python code specifies most of the properties of the new\ntype:\n\n class bool(int):\n\n def __new__(cls, val=0):\n # This constructor always returns an existing instance\n if val:\n return True\n else:\n return False\n\n def __repr__(self):\n if self:\n return \"True\"\n else:\n return \"False\"\n\n __str__ = __repr__\n\n def __and__(self, other):\n if isinstance(other, bool):\n return bool(int(self) & int(other))\n else:\n return int.__and__(self, other)\n\n __rand__ = __and__\n\n def __or__(self, other):\n if isinstance(other, bool):\n return bool(int(self) | int(other))\n else:\n return int.__or__(self, other)\n\n __ror__ = __or__\n\n def __xor__(self, other):\n if isinstance(other, bool):\n return bool(int(self) ^ int(other))\n else:\n return int.__xor__(self, other)\n\n __rxor__ = __xor__\n\n # Bootstrap truth values through sheer willpower\n False = int.__new__(bool, 0)\n True = int.__new__(bool, 1)\n\nThe values False and True will be singletons, like None. Because the\ntype has two values, perhaps these should be called \"doubletons\"? The\nreal implementation will not allow other instances of bool to be\ncreated.\n\nTrue and False will properly round-trip through pickling and\nmarshalling; for example pickle.loads(pickle.dumps(True)) will return\nTrue, and so will marshal.loads(marshal.dumps(True)).\n\nAll built-in operations that are defined to return a Boolean result will\nbe changed to return False or True instead of 0 or 1. In particular,\nthis affects comparisons (<, <=, ==, !=, >, >=, is, is not, in, not in),\nthe unary operator 'not', the built-in functions callable(), hasattr(),\nisinstance() and issubclass(), the dict method has_key(), the string and\nunicode methods endswith(), isalnum(), isalpha(), isdigit(), islower(),\nisspace(), istitle(), isupper(), and startswith(), the unicode methods\nisdecimal() and isnumeric(), and the 'closed' attribute of file objects.\nThe predicates in the operator module are also changed to return a bool,\nincluding operator.truth().\n\nBecause bool inherits from int, True+1 is valid and equals 2, and so on.\nThis is important for backwards compatibility: because comparisons and\nso on currently return integer values, there's no way of telling what\nuses existing applications make of these values.\n\nIt is expected that over time, the standard library will be updated to\nuse False and True when appropriate (but not to require a bool argument\ntype where previous an int was allowed). This change should not pose\nadditional problems and is not specified in detail by this PEP.\n\nC API\n\nThe header file \"boolobject.h\" defines the C API for the bool type. It\nis included by \"Python.h\" so there is no need to include it directly.\n\nThe existing names Py_False and Py_True reference the unique bool\nobjects False and True (previously these referenced static int objects\nwith values 0 and 1, which were not unique amongst int values).\n\nA new API, PyObject *PyBool_FromLong(long), takes a C long int argument\nand returns a new reference to either Py_False (when the argument is\nzero) or Py_True (when it is nonzero).\n\nTo check whether an object is a bool, the macro PyBool_Check() can be\nused.\n\nThe type of bool instances is PyBoolObject *.\n\nThe bool type object is available as PyBool_Type.\n\nClarification\n\nThis PEP does not change the fact that almost all object types can be\nused as truth values. For example, when used in an if statement, an\nempty list is false and a non-empty one is true; this does not change\nand there is no plan to ever change this.\n\nThe only thing that changes is the preferred values to represent truth\nvalues when returned or assigned explicitly. Previously, these preferred\ntruth values were 0 and 1; the PEP changes the preferred values to False\nand True, and changes built-in operations to return these preferred\nvalues.\n\nCompatibility\n\nBecause of backwards compatibility, the bool type lacks many properties\nthat some would like to see. For example, arithmetic operations with one\nor two bool arguments is allowed, treating False as 0 and True as 1.\nAlso, a bool may be used as a sequence index.\n\nI don't see this as a problem, and I don't want evolve the language in\nthis direction either. I don't believe that a stricter interpretation of\n\"Booleanness\" makes the language any clearer.\n\nAnother consequence of the compatibility requirement is that the\nexpression \"True and 6\" has the value 6, and similarly the expression\n\"False or None\" has the value None. The \"and\" and \"or\" operators are\nusefully defined to return the first argument that determines the\noutcome, and this won't change; in particular, they don't force the\noutcome to be a bool. Of course, if both arguments are bools, the\noutcome is always a bool. It can also easily be coerced into being a\nbool by writing for example \"bool(x and y)\".\n\nResolved Issues\n\n(See also the Review section above.)\n\n- Because the repr() or str() of a bool value is different from an int\n value, some code (for example doctest-based unit tests, and possibly\n database code that relies on things like \"%s\" % truth) may fail. It\n is easy to work around this (without explicitly referencing the bool\n type), and it is expected that this only affects a very small amount\n of code that can easily be fixed.\n\n- Other languages (C99, C++, Java) name the constants \"false\" and\n \"true\", in all lowercase. For Python, I prefer to stick with the\n example set by the existing built-in constants, which all use\n CapitalizedWords: None, Ellipsis, NotImplemented (as well as all\n built-in exceptions). Python's built-in namespace uses all lowercase\n for functions and types only.\n\n- It has been suggested that, in order to satisfy user expectations,\n for every x that is considered true in a Boolean context, the\n expression x == True should be true, and likewise if x is considered\n false, x == False should be true. In particular newbies who have\n only just learned about Boolean variables are likely to write :\n\n if x == True: ...\n\n instead of the correct form, :\n\n if x: ...\n\n There seem to be strong psychological and linguistic reasons why\n many people are at first uncomfortable with the latter form, but I\n believe that the solution should be in education rather than in\n crippling the language. After all, == is general seen as a\n transitive operator, meaning that from a==b and b==c we can deduce\n a==c. But if any comparison to True were to report equality when the\n other operand was a true value of any type, atrocities like\n 6==True==7 would hold true, from which one could infer the falsehood\n 6==7. That's unacceptable. (In addition, it would break backwards\n compatibility. But even if it didn't, I'd still be against this, for\n the stated reasons.)\n\n Newbies should also be reminded that there's never a reason to write\n :\n\n if bool(x): ...\n\n since the bool is implicit in the \"if\". Explicit is not better than\n implicit here, since the added verbiage impairs readability and\n there's no other interpretation possible. There is, however,\n sometimes a reason to write :\n\n b = bool(x)\n\n This is useful when it is unattractive to keep a reference to an\n arbitrary object x, or when normalization is required for some other\n reason. It is also sometimes appropriate to write :\n\n i = int(bool(x))\n\n which converts the bool to an int with the value 0 or 1. This\n conveys the intention to henceforth use the value as an int.\n\nImplementation\n\nA complete implementation in C has been uploaded to the SourceForge\npatch manager: https://bugs.python.org/issue528022\n\nThis will soon be checked into CVS for python 2.3a0.\n\nCopyright\n\nThis document has been placed in the public domain."},"source":{"kind":"string","value":"python-peps"},"added":{"kind":"string","value":"2024-10-18T13:23:33.436411"},"created":{"kind":"timestamp","value":"2002-03-08T00:00:00","string":"2002-03-08T00:00:00"},"metadata":{"kind":"string","value":"{\n \"license\": \"Public Domain\",\n \"url\": \"https://peps.python.org/pep-0285/\",\n \"authors\": [\n \"Guido van Rossum\"\n ],\n \"pep_number\": \"0285\",\n \"pandoc_version\": \"3.5\"\n}"}}},{"rowIdx":566,"cells":{"id":{"kind":"string","value":"0373"},"text":{"kind":"string","value":"PEP: 373 Title: Python 2.7 Release Schedule Version: $Revision$\nLast-Modified: $Date$ Author: Benjamin Peterson \nStatus: Final Type: Informational Topic: Release Content-Type:\ntext/x-rst Created: 03-Nov-2008 Python-Version: 2.7\n\nAbstract\n\nThis document describes the development and release schedule for Python\n2.7.\n\nPython 2.7 is the end of the Python 2.x series, and is succeeded by\nPython 3. See the \"Sunsetting Python 2\" FAQ on python.org for a general\noverview.\n\nUpdate (April 2014)\n\nThe End Of Life date (EOL, sunset date) for Python 2.7 has been moved\nfive years into the future, to 2020. This decision was made to clarify\nthe status of Python 2.7 and relieve worries for those users who cannot\nyet migrate to Python 3. See also PEP 466.\n\nThis declaration does not guarantee that bugfix releases will be made on\na regular basis, but it should enable volunteers who want to contribute\nbugfixes for Python 2.7 and it should satisfy vendors who still have to\nsupport Python 2 for years to come.\n\nThere will be no Python 2.8 (see PEP 404).\n\nRelease Manager and Crew\n\n Position Name\n --------------------- -------------------\n 2.7 Release Manager Benjamin Peterson\n Windows installers Steve Dower\n Mac installers Ned Deily\n\nMaintenance releases\n\nBeing the last of the 2.x series, 2.7 received bugfix support until\n2020. Support officially stopped January 1 2020, and 2.7.18 code freeze\noccurred on January 1 2020, but the final release occurred after that\ndate.\n\nDates of previous maintenance releases:\n\n- 2.7.1 2010-11-27\n- 2.7.2 2011-07-21\n- 2.7.3rc1 2012-02-23\n- 2.7.3rc2 2012-03-15\n- 2.7.3 2012-03-09\n- 2.7.4rc1 2013-03-23\n- 2.7.4 2013-04-06\n- 2.7.5 2013-05-12\n- 2.7.6rc1 2013-10-26\n- 2.7.6 2013-11-10\n- 2.7.7rc1 2014-05-17\n- 2.7.7 2014-05-31\n- 2.7.8 2014-06-30\n- 2.7.9rc1 2014-11-26\n- 2.7.9 2014-12-10\n- 2.7.10rc1 2015-05-09\n- 2.7.10 2015-05-23\n- 2.7.11rc1 2015-11-21\n- 2.7.11 2015-12-05\n- 2.7.12 2016-06-25\n- 2.7.13rc1 2016-12-03\n- 2.7.13 2016-12-17\n- 2.7.14rc1 2017-08-26\n- 2.7.14 2017-09-16\n- 2.7.15rc1 2018-04-14\n- 2.7.15 2018-05-01\n- 2.7.16rc 2019-02-16\n- 2.7.16 2019-03-02\n- 2.7.17rc1 2019-10-05\n- 2.7.17 2019-10-19\n- 2.7.18rc1 2020-04-04\n- 2.7.18 2020-04-20\n\n2.7.0 Release Schedule\n\nThe release schedule for 2.7.0 was:\n\n- 2.7 alpha 1 2009-12-05\n- 2.7 alpha 2 2010-01-09\n- 2.7 alpha 3 2010-02-06\n- 2.7 alpha 4 2010-03-06\n- 2.7 beta 1 2010-04-03\n- 2.7 beta 2 2010-05-08\n- 2.7 rc1 2010-06-05\n- 2.7 rc2 2010-06-19\n- 2.7 final 2010-07-03\n\nPossible features for 2.7\n\nNothing here. [Note that a moratorium on core language changes is in\neffect.]\n\nReferences\n\n- \"The Python 2 death march\" on python-dev\n- Petition: abandon plans to ship a 2.7.18 in April\n- [RELEASE] Python 2.7.18, the end of an era\n\nCopyright\n\nThis document has been placed in the public domain.\n\n\f\n\n Local Variables: mode: indented-text indent-tabs-mode: nil\n sentence-end-double-space: t fill-column: 70 coding: utf-8 End:"},"source":{"kind":"string","value":"python-peps"},"added":{"kind":"string","value":"2024-10-18T13:23:33.448885"},"created":{"kind":"timestamp","value":"2008-11-03T00:00:00","string":"2008-11-03T00:00:00"},"metadata":{"kind":"string","value":"{\n \"license\": \"Public Domain\",\n \"url\": \"https://peps.python.org/pep-0373/\",\n \"authors\": [\n \"Benjamin Peterson\"\n ],\n \"pep_number\": \"0373\",\n \"pandoc_version\": \"3.5\"\n}"}}},{"rowIdx":567,"cells":{"id":{"kind":"string","value":"0319"},"text":{"kind":"string","value":"PEP: 319 Title: Python Synchronize/Asynchronize Block Version:\n$Revision$ Last-Modified: $Date$ Author: Michel Pelletier\n Status: Rejected Type: Standards Track\nContent-Type: text/x-rst Created: 24-Feb-2003 Python-Version: 2.4\nPost-History:\n\nAbstract\n\nThis PEP proposes adding two new keywords to Python, 'synchronize' and\n'asynchronize'.\n\nPronouncement\n\nThis PEP is rejected in favor of PEP 343.\n\nThe 'synchronize' Keyword\n\n The concept of code synchronization in Python is too low-level. To\n synchronize code a programmer must be aware of the details of the\n following pseudo-code pattern:\n\n initialize_lock()\n\n ...\n\n acquire_lock()\n try:\n change_shared_data()\n finally:\n release_lock()\n\n This synchronized block pattern is not the only pattern (more\n discussed below) but it is very common. This PEP proposes replacing\n the above code with the following equivalent:\n\n synchronize:\n change_shared_data()\n\n The advantages of this scheme are simpler syntax and less room for\n user error. Currently users are required to write code about\n acquiring and releasing thread locks in 'try/finally' blocks; errors\n in this code can cause notoriously difficult concurrent thread\n locking issues.\n\nThe 'asynchronize' Keyword\n\n While executing a 'synchronize' block of code a programmer may want\n to \"drop back\" to running asynchronously momentarily to run blocking\n input/output routines or something else that might take an\n indeterminate amount of time and does not require synchronization.\n This code usually follows the pattern:\n\n initialize_lock()\n\n ...\n\n acquire_lock()\n try:\n change_shared_data()\n release_lock() # become async\n do_blocking_io()\n acquire_lock() # sync again\n change_shared_data2()\n\n finally:\n release_lock()\n\n The asynchronous section of the code is not very obvious visually,\n so it is marked up with comments. Using the proposed 'asynchronize'\n keyword this code becomes much cleaner, easier to understand, and\n less prone to error:\n\n synchronize:\n change_shared_data()\n\n asynchronize:\n do_blocking_io()\n\n change_shared_data2()\n\n Encountering an 'asynchronize' keyword inside a non-synchronized\n block can raise either an error or issue a warning (as all code\n blocks are implicitly asynchronous anyway). It is important to note\n that the above example is not the same as:\n\n synchronize:\n change_shared_data()\n\n do_blocking_io()\n\n synchronize:\n change_shared_data2()\n\n Because both synchronized blocks of code may be running inside the\n same iteration of a loop, Consider:\n\n while in_main_loop():\n synchronize:\n change_shared_data()\n\n asynchronize:\n do_blocking_io()\n\n change_shared_data2()\n\n Many threads may be looping through this code. Without the\n 'asynchronize' keyword one thread cannot stay in the loop and\n release the lock at the same time while blocking IO is going on.\n This pattern of releasing locks inside a main loop to do blocking IO\n is used extensively inside the CPython interpreter itself.\n\nSynchronization Targets\n\nAs proposed the 'synchronize' and 'asynchronize' keywords synchronize a\nblock of code. However programmers may want to specify a target object\nthat threads synchronize on. Any object can be a synchronization target.\n\nConsider a two-way queue object: two different objects are used by the\nsame 'synchronize' code block to synchronize both queues separately in\nthe 'get' method:\n\n class TwoWayQueue:\n def __init__(self):\n self.front = []\n self.rear = []\n\n def putFront(self, item):\n self.put(item, self.front)\n\n def getFront(self):\n item = self.get(self.front)\n return item\n\n def putRear(self, item):\n self.put(item, self.rear)\n\n def getRear(self):\n item = self.get(self.rear)\n return item\n\n def put(self, item, queue):\n synchronize queue:\n queue.append(item)\n\n def get(self, queue):\n synchronize queue:\n item = queue[0]\n del queue[0]\n return item\n\nHere is the equivalent code in Python as it is now without a\n'synchronize' keyword:\n\n import thread\n\n class LockableQueue:\n\n def __init__(self):\n self.queue = []\n self.lock = thread.allocate_lock()\n\n class TwoWayQueue:\n def __init__(self):\n self.front = LockableQueue()\n self.rear = LockableQueue()\n\n def putFront(self, item):\n self.put(item, self.front)\n\n def getFront(self):\n item = self.get(self.front)\n return item\n\n def putRear(self, item):\n self.put(item, self.rear)\n\n def getRear(self):\n item = self.get(self.rear)\n return item\n\n def put(self, item, queue):\n queue.lock.acquire()\n try:\n queue.append(item)\n finally:\n queue.lock.release()\n\n def get(self, queue):\n queue.lock.acquire()\n try:\n item = queue[0]\n del queue[0]\n return item\n finally:\n queue.lock.release()\n\nThe last example had to define an extra class to associate a lock with\nthe queue where the first example the 'synchronize' keyword does this\nassociation internally and transparently.\n\nOther Patterns that Synchronize\n\nThere are some situations where the 'synchronize' and 'asynchronize'\nkeywords cannot entirely replace the use of lock methods like acquire\nand release. Some examples are if the programmer wants to provide\narguments for acquire or if a lock is acquired in one code block but\nreleased in another, as shown below.\n\nHere is a class from Zope modified to use both the 'synchronize' and\n'asynchronize' keywords and also uses a pool of explicit locks that are\nacquired and released in different code blocks and thus don't use\n'synchronize':\n\n import thread\n from ZServerPublisher import ZServerPublisher\n\n class ZRendevous:\n\n def __init__(self, n=1):\n pool=[]\n self._lists=pool, [], []\n\n synchronize:\n while n > 0:\n l=thread.allocate_lock()\n l.acquire()\n pool.append(l)\n thread.start_new_thread(ZServerPublisher,\n (self.accept,))\n n=n-1\n\n def accept(self):\n synchronize:\n pool, requests, ready = self._lists\n while not requests:\n l=pool[-1]\n del pool[-1]\n ready.append(l)\n\n asynchronize:\n l.acquire()\n\n pool.append(l)\n\n r=requests[0]\n del requests[0]\n return r\n\n def handle(self, name, request, response):\n synchronize:\n pool, requests, ready = self._lists\n requests.append((name, request, response))\n if ready:\n l=ready[-1]\n del ready[-1]\n l.release()\n\nHere is the original class as found in the\n'Zope/ZServer/PubCore/ZRendevous.py' module. The \"convenience\" of the\n'_a' and '_r' shortcut names obscure the code:\n\n import thread\n from ZServerPublisher import ZServerPublisher\n\n class ZRendevous:\n\n def __init__(self, n=1):\n sync=thread.allocate_lock()\n self._a=sync.acquire\n self._r=sync.release\n pool=[]\n self._lists=pool, [], []\n self._a()\n try:\n while n > 0:\n l=thread.allocate_lock()\n l.acquire()\n pool.append(l)\n thread.start_new_thread(ZServerPublisher,\n (self.accept,))\n n=n-1\n finally: self._r()\n\n def accept(self):\n self._a()\n try:\n pool, requests, ready = self._lists\n while not requests:\n l=pool[-1]\n del pool[-1]\n ready.append(l)\n self._r()\n l.acquire()\n self._a()\n pool.append(l)\n\n r=requests[0]\n del requests[0]\n return r\n finally: self._r()\n\n def handle(self, name, request, response):\n self._a()\n try:\n pool, requests, ready = self._lists\n requests.append((name, request, response))\n if ready:\n l=ready[-1]\n del ready[-1]\n l.release()\n finally: self._r()\n\nIn particular the asynchronize section of the accept method is not very\nobvious. To beginner programmers, 'synchronize' and 'asynchronize'\nremove many of the problems encountered when juggling multiple acquire\nand release methods on different locks in different try/finally blocks.\n\nFormal Syntax\n\nPython syntax is defined in a modified BNF grammar notation described in\nthe Python Language Reference[1]. This section describes the proposed\nsynchronization syntax using this grammar:\n\n synchronize_stmt: 'synchronize' [test] ':' suite\n asynchronize_stmt: 'asynchronize' [test] ':' suite\n compound_stmt: ... | synchronized_stmt | asynchronize_stmt\n\n(The '...' indicates other compound statements elided).\n\nProposed Implementation\n\nThe author of this PEP has not explored an implementation yet. There are\nseveral implementation issues that must be resolved. The main\nimplementation issue is what exactly gets locked and unlocked during a\nsynchronized block.\n\nDuring an unqualified synchronized block (the use of the 'synchronize'\nkeyword without a target argument) a lock could be created and\nassociated with the synchronized code block object. Any threads that are\nto execute the block must first acquire the code block lock.\n\nWhen an 'asynchronize' keyword is encountered in a 'synchronize' block\nthe code block lock is unlocked before the inner block is executed and\nre-locked when the inner block terminates.\n\nWhen a synchronized block target is specified the object is associated\nwith a lock. How this is implemented cleanly is probably the highest\nrisk of this proposal. Java Virtual Machines typically associate a\nspecial hidden lock object with target object and use it to synchronized\nthe block around the target only.\n\nBackward Compatibility\n\nBackward compatibility is solved with the new from __future__ Python\nsyntax (PEP 236), and the new warning framework (PEP 230) to evolve the\nPython language into phasing out any conflicting names that use the new\nkeywords 'synchronize' and 'asynchronize'. To use the syntax now, a\ndeveloper could use the statement:\n\n from __future__ import threadsync # or whatever\n\nIn addition, any code that uses the keyword 'synchronize' or\n'asynchronize' as an identifier will be issued a warning from Python.\nAfter the appropriate period of time, the syntax would become standard,\nthe above import statement would do nothing, and any identifiers named\n'synchronize' or 'asynchronize' would raise an exception.\n\nPEP 310 Reliable Acquisition/Release Pairs\n\nPEP 310 proposes the 'with' keyword that can serve the same function as\n'synchronize' (but no facility for 'asynchronize'). The pattern:\n\n initialize_lock()\n\n with the_lock:\n change_shared_data()\n\nis equivalent to the proposed:\n\n synchronize the_lock:\n change_shared_data()\n\nPEP 310 must synchronize on an existing lock, while this PEP proposes\nthat unqualified 'synchronize' statements synchronize on a global,\ninternal, transparent lock in addition to qualified 'synchronize'\nstatements. The 'with' statement also requires lock initialization,\nwhile the 'synchronize' statement can synchronize on any target object\nincluding locks.\n\nWhile limited in this fashion, the 'with' statement is more abstract and\nserves more purposes than synchronization. For example, transactions\ncould be used with the 'with' keyword:\n\n initialize_transaction()\n\n with my_transaction:\n do_in_transaction()\n\n # when the block terminates, the transaction is committed.\n\nThe 'synchronize' and 'asynchronize' keywords cannot serve this or any\nother general acquire/release pattern other than thread synchronization.\n\nHow Java Does It\n\nJava defines a 'synchronized' keyword (note the grammatical tense\ndifferent between the Java keyword and this PEP's 'synchronize') which\nmust be qualified on any object. The syntax is:\n\n synchronized (Expression) Block\n\nExpression must yield a valid object (null raises an error and\nexceptions during 'Expression' terminate the 'synchronized' block for\nthe same reason) upon which 'Block' is synchronized.\n\nHow Jython Does It\n\nJython uses a 'synchronize' class with the static method\n'make_synchronized' that accepts one callable argument and returns a\nnewly created, synchronized, callable \"wrapper\" around the argument.\n\nSummary of Proposed Changes to Python\n\nAdding new 'synchronize' and 'asynchronize' keywords to the language.\n\nRisks\n\nThis PEP proposes adding two keywords to the Python language. This may\nbreak code.\n\nThere is no implementation to test.\n\nIt's not the most important problem facing Python programmers today\n(although it is a fairly notorious one).\n\nThe equivalent Java keyword is the past participle 'synchronized'. This\nPEP proposes the present tense, 'synchronize' as being more in spirit\nwith Python (there being less distinction between compile-time and\nrun-time in Python than Java).\n\nDissenting Opinion\n\nThis PEP has not been discussed on python-dev.\n\nReferences\n\nCopyright\n\nThis document has been placed in the public domain.\n\n[1] The Python Language Reference http://docs.python.org/reference/"},"source":{"kind":"string","value":"python-peps"},"added":{"kind":"string","value":"2024-10-18T13:23:33.460291"},"created":{"kind":"timestamp","value":"2003-02-24T00:00:00","string":"2003-02-24T00:00:00"},"metadata":{"kind":"string","value":"{\n \"license\": \"Public Domain\",\n \"url\": \"https://peps.python.org/pep-0319/\",\n \"authors\": [\n \"Michel Pelletier\"\n ],\n \"pep_number\": \"0319\",\n \"pandoc_version\": \"3.5\"\n}"}}},{"rowIdx":568,"cells":{"id":{"kind":"string","value":"0693"},"text":{"kind":"string","value":"PEP: 693 Title: Python 3.12 Release Schedule Author: Thomas Wouters\n Status: Active Type: Informational Topic: Release\nCreated: 24-May-2022 Python-Version: 3.12\n\nAbstract\n\nThis document describes the development and release schedule for Python\n3.12.\n\nRelease Manager and Crew\n\n- 3.12 Release Manager: Thomas Wouters\n- Windows installers: Steve Dower\n- Mac installers: Ned Deily\n- Documentation: Julien Palard\n\nRelease Schedule\n\n3.12.0 schedule\n\nNote: the dates below use a 17-month development period that results in\na 12-month release cadence between feature versions, as defined by PEP\n602.\n\nActual:\n\n- 3.12 development begins: Sunday, 2022-05-08\n- 3.12.0 alpha 1: Monday, 2022-10-24\n- 3.12.0 alpha 2: Monday, 2022-11-14\n- 3.12.0 alpha 3: Tuesday, 2022-12-06\n- 3.12.0 alpha 4: Tuesday, 2023-01-10\n- 3.12.0 alpha 5: Tuesday, 2023-02-07\n- 3.12.0 alpha 6: Tuesday, 2023-03-07\n- 3.12.0 alpha 7: Tuesday, 2023-04-04\n- 3.12.0 beta 1: Monday, 2023-05-22 (No new features beyond this\n point.)\n- 3.12.0 beta 2: Tuesday, 2023-06-06\n- 3.12.0 beta 3: Monday, 2023-06-19\n- 3.12.0 beta 4: Tuesday, 2023-07-11\n- 3.12.0 candidate 1: Sunday, 2023-08-06\n- 3.12.0 candidate 2: Wednesday, 2023-09-06\n- 3.12.0 candidate 3: Tuesday, 2023-09-19\n- 3.12.0 final: Monday, 2023-10-02\n\nBugfix releases\n\nActual:\n\n- 3.12.1: Thursday, 2023-12-07\n- 3.12.2: Tuesday, 2024-02-06\n- 3.12.3: Tuesday, 2024-04-09\n- 3.12.4: Thursday, 2024-06-06\n- 3.12.5: Tuesday, 2024-08-06\n- 3.12.6: Friday, 2024-09-06\n- 3.12.7: Tuesday, 2024-10-01\n\nExpected:\n\n- 3.12.8: Tuesday, 2024-12-03\n- 3.12.9: Tuesday, 2025-02-04\n- 3.12.10: Tuesday, 2025-04-08\n\nSource-only security fix releases\n\nProvided irregularly on an as-needed basis until October 2028.\n\n3.12 Lifespan\n\n3.12 will receive bugfix updates approximately every 2 months for\napproximately 18 months. Some time after the release of 3.13.0 final,\nthe ninth and final 3.12 bugfix update will be released. After that, it\nis expected that security updates (source only) will be released until 5\nyears after the release of 3.12.0 final, so until approximately October\n2028.\n\nFeatures for 3.12\n\nNew features can be found in What’s New In Python 3.12.\n\nCopyright\n\nThis document is placed in the public domain or under the\nCC0-1.0-Universal license, whichever is more permissive."},"source":{"kind":"string","value":"python-peps"},"added":{"kind":"string","value":"2024-10-18T13:23:33.470636"},"created":{"kind":"timestamp","value":"2022-05-24T00:00:00","string":"2022-05-24T00:00:00"},"metadata":{"kind":"string","value":"{\n \"license\": \"Public Domain\",\n \"url\": \"https://peps.python.org/pep-0693/\",\n \"authors\": [\n \"Thomas Wouters\"\n ],\n \"pep_number\": \"0693\",\n \"pandoc_version\": \"3.5\"\n}"}}},{"rowIdx":569,"cells":{"id":{"kind":"string","value":"0348"},"text":{"kind":"string","value":"PEP: 348 Title: Exception Reorganization for Python 3.0 Version:\n$Revision$ Last-Modified: $Date$ Author: Brett Cannon \nStatus: Rejected Type: Standards Track Content-Type: text/x-rst Created:\n28-Jul-2005 Post-History:\n\nNote\n\nThis PEP has been rejected[1].\n\nAbstract\n\nPython, as of version 2.4, has 38 exceptions (including warnings) in the\nbuilt-in namespace in a rather shallow hierarchy. These classes have\ncome about over the years without a chance to learn from experience.\nThis PEP proposes doing a reorganization of the hierarchy for Python 3.0\nwhen backwards-compatibility is not as much of an issue.\n\nAlong with this reorganization, adding a requirement that all objects\npassed to a raise statement must inherit from a specific superclass is\nproposed. This is to have guarantees about the basic interface of\nexceptions and to further enhance the natural hierarchy of exceptions.\n\nLastly, bare except clauses will be changed to be semantically\nequivalent to except Exception. Most people currently use bare except\nclause for this purpose and with the exception hierarchy reorganization\nbecomes a viable default.\n\nRationale For Wanting Change\n\nExceptions are a critical part of Python. While exceptions are\ntraditionally used to signal errors in a program, they have also grown\nto be used for flow control for things such as iterators.\n\nWhile their importance is great, there is a lack of structure to them.\nThis stems from the fact that any object can be raised as an exception.\nBecause of this you have no guarantee in terms of what kind of object\nwill be raised, destroying any possible hierarchy raised objects might\nadhere to.\n\nBut exceptions do have a hierarchy, showing the severity of the\nexception. The hierarchy also groups related exceptions together to\nsimplify catching them in except clauses. To allow people to be able to\nrely on this hierarchy, a common superclass that all raise objects must\ninherit from is being proposed. It also allows guarantees about the\ninterface to raised objects to be made (see PEP 344). A discussion about\nall of this has occurred before on python-dev[2].\n\nAs bare except clauses stand now, they catch all exceptions. While this\ncan be handy, it is rather overreaching for the common case. Thanks to\nhaving a required superclass, catching all exceptions is as easy as\ncatching just one specific exception. This allows bare except clauses to\nbe used for a more useful purpose. Once again, this has been discussed\non python-dev[3].\n\nFinally, slight changes to the exception hierarchy will make it much\nmore reasonable in terms of structure. By minor rearranging exceptions\nthat should not typically be caught can be allowed to propagate to the\ntop of the execution stack, terminating the interpreter as intended.\n\nPhilosophy of Reorganization\n\nFor the reorganization of the hierarchy, there was a general philosophy\nfollowed that developed from discussion of earlier drafts of this\nPEP[4],[5], [6],[7], [8],[9]. First and foremost was to not break\nanything that works. This meant that renaming exceptions was out of the\nquestion unless the name was deemed severely bad. This also meant no\nremoval of exceptions unless they were viewed as truly misplaced. The\nintroduction of new exceptions were only done in situations where there\nmight be a use for catching a superclass of a category of exceptions.\nLastly, existing exceptions would have their inheritance tree changed\nonly if it was felt they were truly misplaced to begin with.\n\nFor all new exceptions, the proper suffix had to be chosen. For those\nthat signal an error, \"Error\" is to be used. If the exception is a\nwarning, then \"Warning\". \"Exception\" is to be used when none of the\nother suffixes are proper to use and no specific suffix is a better fit.\n\nAfter that it came down to choosing which exceptions should and should\nnot inherit from Exception. This was for the purpose of making bare\nexcept clauses more useful.\n\nLastly, the entire existing hierarchy had to inherit from the new\nexception meant to act as the required superclass for all exceptions to\ninherit from.\n\nNew Hierarchy\n\nNote\n\nExceptions flagged with \"stricter inheritance\" will no longer inherit\nfrom a certain class. A \"broader inheritance\" flag means a class has\nbeen added to the exception's inheritance tree. All comparisons are\nagainst the Python 2.4 exception hierarchy.\n\n+-- BaseException (new; broader inheritance for subclasses)\n\n \n\n +-- Exception\n\n +-- GeneratorExit (defined in PEP 342) +-- StandardError +--\n ArithmeticError +-- DivideByZeroError +-- FloatingPointError +--\n OverflowError +-- AssertionError +-- AttributeError +--\n EnvironmentError +-- IOError +-- EOFError +-- OSError +--\n ImportError +-- LookupError +-- IndexError +-- KeyError +--\n MemoryError +-- NameError +-- UnboundLocalError +--\n NotImplementedError (stricter inheritance) +-- SyntaxError +--\n IndentationError +-- TabError +-- TypeError +-- RuntimeError +--\n UnicodeError +-- UnicodeDecodeError +-- UnicodeEncodeError +--\n UnicodeTranslateError +-- ValueError +-- ReferenceError +--\n StopIteration +-- SystemError +-- Warning +-- DeprecationWarning\n +-- FutureWarning +-- PendingDeprecationWarning +--\n RuntimeWarning +-- SyntaxWarning +-- UserWarning + --\n WindowsError\n\n +-- KeyboardInterrupt (stricter inheritance) +-- SystemExit\n (stricter inheritance)\n\nDifferences Compared to Python 2.4\n\nA more thorough explanation of terms is needed when discussing\ninheritance changes. Inheritance changes result in either broader or\nmore restrictive inheritance. \"Broader\" is when a class has an\ninheritance tree like cls, A and then becomes cls, B, A. \"Stricter\" is\nthe reverse.\n\nBaseException\n\nThe superclass that all exceptions must inherit from. It's name was\nchosen to reflect that it is at the base of the exception hierarchy\nwhile being an exception itself. \"Raisable\" was considered as a name, it\nwas passed on because its name did not properly reflect the fact that it\nis an exception itself.\n\nDirect inheritance of BaseException is not expected, and will be\ndiscouraged for the general case. Most user-defined exceptions should\ninherit from Exception instead. This allows catching Exception to\ncontinue to work in the common case of catching all exceptions that\nshould be caught. Direct inheritance of BaseException should only be\ndone in cases where an entirely new category of exception is desired.\n\nBut, for cases where all exceptions should be caught blindly,\nexcept BaseException will work.\n\nKeyboardInterrupt and SystemExit\n\nBoth exceptions are no longer under Exception. This is to allow bare\nexcept clauses to act as a more viable default case by catching\nexceptions that inherit from Exception. With both KeyboardInterrupt and\nSystemExit acting as signals that the interpreter is expected to exit,\ncatching them in the common case is the wrong semantics.\n\nNotImplementedError\n\nInherits from Exception instead of from RuntimeError.\n\nOriginally inheriting from RuntimeError, NotImplementedError does not\nhave any direct relation to the exception meant for use in user code as\na quick-and-dirty exception. Thus it now directly inherits from\nException.\n\nRequired Superclass for raise\n\nBy requiring all objects passed to a raise statement to inherit from a\nspecific superclass, all exceptions are guaranteed to have certain\nattributes. If PEP 344 is accepted, the attributes outlined there will\nbe guaranteed to be on all exceptions raised. This should help\nfacilitate debugging by making the querying of information from\nexceptions much easier.\n\nThe proposed hierarchy has BaseException as the required base class.\n\nImplementation\n\nEnforcement is straightforward. Modifying RAISE_VARARGS to do an\ninheritance check first before raising an exception should be enough.\nFor the C API, all functions that set an exception will have the same\ninheritance check applied.\n\nBare except Clauses Catch Exception\n\nIn most existing Python 2.4 code, bare except clauses are too broad in\nthe exceptions they catch. Typically only exceptions that signal an\nerror are desired to be caught. This means that exceptions that are used\nto signify that the interpreter should exit should not be caught in the\ncommon case.\n\nWith KeyboardInterrupt and SystemExit moved to inherit from\nBaseException instead of Exception, changing bare except clauses to act\nas except Exception becomes a much more reasonable default. This change\nalso will break very little code since these semantics are what most\npeople want for bare except clauses.\n\nThe complete removal of bare except clauses has been argued for. The\ncase has been made that they violate both Only One Way To Do It (OOWTDI)\nand Explicit Is Better Than Implicit (EIBTI) as listed in the\nZen of Python <20>. But Practicality Beats Purity (PBP), also in the Zen\nof Python, trumps both of these in this case. The BDFL has stated that\nbare except clauses will work this way [10].\n\nImplementation\n\nThe compiler will emit the bytecode for except Exception whenever a bare\nexcept clause is reached.\n\nTransition Plan\n\nBecause of the complexity and clutter that would be required to add all\nfeatures planned in this PEP, the transition plan is very simple. In\nPython 2.5 BaseException is added. In Python 3.0, all remaining features\n(required superclass, change in inheritance, bare except clauses\nbecoming the same as except Exception) will go into affect. In order to\nmake all of this work in a backwards-compatible way in Python 2.5 would\nrequire very deep hacks in the exception machinery which could be\nerror-prone and lead to a slowdown in performance for little benefit.\n\nTo help with the transition, the documentation will be changed to\nreflect several programming guidelines:\n\n- When one wants to catch all exceptions, catch BaseException\n- To catch all exceptions that do not represent the termination of the\n interpreter, catch Exception explicitly\n- Explicitly catch KeyboardInterrupt and SystemExit; don't rely on\n inheritance from Exception to lead to the capture\n- Always catch NotImplementedError explicitly instead of relying on\n the inheritance from RuntimeError\n\nThe documentation for the 'exceptions' module[11], tutorial[12], and PEP\n290 will all require updating.\n\nRejected Ideas\n\nDeprecationWarning Inheriting From PendingDeprecationWarning\n\nThis was originally proposed because a DeprecationWarning can be viewed\nas a PendingDeprecationWarning that is being removed in the next\nversion. But since enough people thought the inheritance could logically\nwork the other way around, the idea was dropped.\n\nAttributeError Inheriting From TypeError or NameError\n\nViewing attributes as part of the interface of a type caused the idea of\ninheriting from TypeError. But that partially defeats the thinking of\nduck typing and thus the idea was dropped.\n\nInheriting from NameError was suggested because objects can be viewed as\nhaving their own namespace where the attributes live and when an\nattribute is not found it is a namespace failure. This was also dropped\nas a possibility since not everyone shared this view.\n\nRemoval of EnvironmentError\n\nOriginally proposed based on the idea that EnvironmentError was an\nunneeded distinction, the BDFL overruled this idea[13].\n\nIntroduction of MacError and UnixError\n\nProposed to add symmetry to WindowsError, the BDFL said they won't be\nused enough[14]. The idea of then removing WindowsError was proposed and\naccepted as reasonable, thus completely negating the idea of adding\nthese exceptions.\n\nSystemError Subclassing SystemExit\n\nProposed because a SystemError is meant to lead to a system exit, the\nidea was removed since CriticalError indicates this better.\n\nControlFlowException Under Exception\n\nIt has been suggested that ControlFlowException should inherit from\nException. This idea has been rejected based on the thinking that\ncontrol flow exceptions typically do not all need to be caught by a\nsingle except clause.\n\nRename NameError to NamespaceError\n\nNameError is considered more succinct and leaves open no possible\nmistyping of the capitalization of \"Namespace\"[15].\n\nRenaming RuntimeError or Introducing SimpleError\n\nThe thinking was that RuntimeError was in no way an obvious name for an\nexception meant to be used when a situation did not call for the\ncreation of a new exception. The renaming was rejected on the basis that\nthe exception is already used throughout the interpreter [16]. Rejection\nof SimpleError was founded on the thought that people should be free to\nuse whatever exception they choose and not have one so blatantly\nsuggested[17].\n\nRenaming Existing Exceptions\n\nVarious renamings were suggested but non garnered more than a +0 vote\n(renaming ReferenceError to WeakReferenceError). The thinking was that\nthe existing names were fine and no one had actively complained about\nthem ever. To minimize backwards-compatibility issues and causing\nexisting Python programmers extra pain, the renamings were removed.\n\nHave EOFError Subclass IOError\n\nThe original thought was that since EOFError deals directly with I/O, it\nshould subclass IOError. But since EOFError is used more as a signal\nthat an event has occurred (the exhaustion of an I/O port), it should\nnot subclass such a specific error exception.\n\nHave MemoryError and SystemError Have a Common Superclass\n\nBoth classes deal with the interpreter, so why not have them have a\ncommon superclass? Because one of them means that the interpreter is in\na state that it should not recover from while the other does not.\n\nCommon Superclass for PendingDeprecationWarning and DeprecationWarning\n\nGrouping the deprecation warning exceptions together makes intuitive\nsense. But this sensical idea does not extend well when one considers\nhow rarely either warning is used, let along at the same time.\n\nRemoving WindowsError\n\nOriginally proposed based on the idea that having such a\nplatform-specific exception should not be in the built-in namespace. It\nturns out, though, enough code exists that uses the exception to warrant\nit staying.\n\nSuperclass for KeyboardInterrupt and SystemExit\n\nProposed to make catching non-Exception inheriting exceptions easier\nalong with easing the transition to the new hierarchy, the idea was\nrejected by the BDFL[18]. The argument that existing code did not show\nenough instances of the pair of exceptions being caught and thus did not\njustify cluttering the built-in namespace was used.\n\nAcknowledgements\n\nThanks to Robert Brewer, Josiah Carlson, Alyssa Coghlan, Timothy\nDelaney, Jack Diedrich, Fred L. Drake, Jr., Philip J. Eby, Greg Ewing,\nJames Y. Knight, MA Lemburg, Guido van Rossum, Stephen J. Turnbull,\nRaymond Hettinger, and everyone else I missed for participating in the\ndiscussion.\n\nReferences\n\nCopyright\n\nThis document has been placed in the public domain.\n\n\f\n\n Local Variables: mode: indented-text indent-tabs-mode: nil\n sentence-end-double-space: t fill-column: 70 End:\n\n[1] python-dev email (Bare except clauses in PEP 348)\nhttps://mail.python.org/pipermail/python-dev/2005-August/055676.html\n\n[2] python-dev Summary (An exception is an exception, unless it doesn't\ninherit from Exception)\nhttp://www.python.org/dev/summary/2004-08-01_2004-08-15.html#an-exception-is-an-exception-unless-it-doesn-t-inherit-from-exception\n\n[3] python-dev email (PEP, take 2: Exception Reorganization for Python\n3.0)\nhttps://mail.python.org/pipermail/python-dev/2005-August/055116.html\n\n[4] python-dev thread (Pre-PEP: Exception Reorganization for Python 3.0)\nhttps://mail.python.org/pipermail/python-dev/2005-July/055020.html,\nhttps://mail.python.org/pipermail/python-dev/2005-August/055065.html\n\n[5] python-dev thread (PEP, take 2: Exception Reorganization for Python\n3.0)\nhttps://mail.python.org/pipermail/python-dev/2005-August/055103.html\n\n[6] python-dev thread (Reorg PEP checked in)\nhttps://mail.python.org/pipermail/python-dev/2005-August/055138.html\n\n[7] python-dev thread (Major revision of PEP 348 committed)\nhttps://mail.python.org/pipermail/python-dev/2005-August/055199.html\n\n[8] python-dev thread (Exception Reorg PEP revised yet again)\nhttps://mail.python.org/pipermail/python-dev/2005-August/055292.html\n\n[9] python-dev thread (PEP 348 (exception reorg) revised again)\nhttps://mail.python.org/pipermail/python-dev/2005-August/055412.html\n\n[10] python-dev email (PEP 348 (exception reorg) revised again)\nhttps://mail.python.org/pipermail/python-dev/2005-August/055423.html\n\n[11] exceptions module http://docs.python.org/library/exceptions.html\n\n[12] Python Tutorial http://docs.python.org/tutorial/\n\n[13] python-dev email (Pre-PEP: Exception Reorganization for Python 3.0)\nhttps://mail.python.org/pipermail/python-dev/2005-July/055019.html\n\n[14] python-dev email (Pre-PEP: Exception Reorganization for Python 3.0)\nhttps://mail.python.org/pipermail/python-dev/2005-July/055019.html\n\n[15] python-dev email (PEP, take 2: Exception Reorganization for Python\n3.0)\nhttps://mail.python.org/pipermail/python-dev/2005-August/055159.html\n\n[16] python-dev email (Exception Reorg PEP checked in)\nhttps://mail.python.org/pipermail/python-dev/2005-August/055149.html\n\n[17] python-dev email (Exception Reorg PEP checked in)\nhttps://mail.python.org/pipermail/python-dev/2005-August/055175.html\n\n[18] python-dev email (PEP 348 (exception reorg) revised again)\nhttps://mail.python.org/pipermail/python-dev/2005-August/055423.html"},"source":{"kind":"string","value":"python-peps"},"added":{"kind":"string","value":"2024-10-18T13:23:33.493056"},"created":{"kind":"timestamp","value":"2005-07-28T00:00:00","string":"2005-07-28T00:00:00"},"metadata":{"kind":"string","value":"{\n \"license\": \"Public Domain\",\n \"url\": \"https://peps.python.org/pep-0348/\",\n \"authors\": [\n \"Brett Cannon\"\n ],\n \"pep_number\": \"0348\",\n \"pandoc_version\": \"3.5\"\n}"}}},{"rowIdx":570,"cells":{"id":{"kind":"string","value":"3112"},"text":{"kind":"string","value":"PEP: 3112 Title: Bytes literals in Python 3000 Version: $Revision$\nLast-Modified: $Date$ Author: Jason Orendorff\n Status: Final Type: Standards Track\nContent-Type: text/x-rst Requires: 358 Created: 23-Feb-2007\nPython-Version: 3.0 Post-History: 23-Feb-2007\n\nAbstract\n\nThis PEP proposes a literal syntax for the bytes objects introduced in\nPEP 358. The purpose is to provide a convenient way to spell ASCII\nstrings and arbitrary binary data.\n\nMotivation\n\nExisting spellings of an ASCII string in Python 3000 include:\n\n bytes('Hello world', 'ascii')\n 'Hello world'.encode('ascii')\n\nThe proposed syntax is:\n\n b'Hello world'\n\nExisting spellings of an 8-bit binary sequence in Python 3000 include:\n\n bytes([0x7f, 0x45, 0x4c, 0x46, 0x01, 0x01, 0x01, 0x00])\n bytes('\\x7fELF\\x01\\x01\\x01\\0', 'latin-1')\n '7f454c4601010100'.decode('hex')\n\nThe proposed syntax is:\n\n b'\\x7f\\x45\\x4c\\x46\\x01\\x01\\x01\\x00'\n b'\\x7fELF\\x01\\x01\\x01\\0'\n\nIn both cases, the advantages of the new syntax are brevity, some small\nefficiency gain, and the detection of encoding errors at compile time\nrather than at runtime. The brevity benefit is especially felt when\nusing the string-like methods of bytes objects:\n\n lines = bdata.split(bytes('\\n', 'ascii')) # existing syntax\n lines = bdata.split(b'\\n') # proposed syntax\n\nAnd when converting code from Python 2.x to Python 3000:\n\n sok.send('EXIT\\r\\n') # Python 2.x\n sok.send('EXIT\\r\\n'.encode('ascii')) # Python 3000 existing\n sok.send(b'EXIT\\r\\n') # proposed\n\nGrammar Changes\n\nThe proposed syntax is an extension of the existing string syntax[1].\n\nThe new syntax for strings, including the new bytes literal, is:\n\n stringliteral: [stringprefix] (shortstring | longstring)\n stringprefix: \"b\" | \"r\" | \"br\" | \"B\" | \"R\" | \"BR\" | \"Br\" | \"bR\"\n shortstring: \"'\" shortstringitem* \"'\" | '\"' shortstringitem* '\"'\n longstring: \"'''\" longstringitem* \"'''\" | '\"\"\"' longstringitem* '\"\"\"'\n shortstringitem: shortstringchar | escapeseq\n longstringitem: longstringchar | escapeseq\n shortstringchar:\n \n longstringchar: \n escapeseq: \"\\\" NL\n | \"\\\\\" | \"\\'\" | '\\\"'\n | \"\\a\" | \"\\b\" | \"\\f\" | \"\\n\" | \"\\r\" | \"\\t\" | \"\\v\"\n | \"\\ooo\" | \"\\xhh\"\n | \"\\uxxxx\" | \"\\Uxxxxxxxx\" | \"\\N{name}\"\n\nThe following additional restrictions apply only to bytes literals\n(stringliteral tokens with b or B in the stringprefix):\n\n- Each shortstringchar or longstringchar must be a character between 1\n and 127 inclusive, regardless of any encoding declaration[2] in the\n source file.\n- The Unicode-specific escape sequences \\u*xxxx*, \\U*xxxxxxxx*, and\n \\N{*name*} are unrecognized in Python 2.x and forbidden in Python\n 3000.\n\nAdjacent bytes literals are subject to the same concatenation rules as\nadjacent string literals[3]. A bytes literal adjacent to a string\nliteral is an error.\n\nSemantics\n\nEach evaluation of a bytes literal produces a new bytes object. The\nbytes in the new object are the bytes represented by the shortstringitem\nor longstringitem parts of the literal, in the same order.\n\nRationale\n\nThe proposed syntax provides a cleaner migration path from Python 2.x to\nPython 3000 for most code involving 8-bit strings. Preserving the old\n8-bit meaning of a string literal is usually as simple as adding a b\nprefix. The one exception is Python 2.x strings containing bytes >127,\nwhich must be rewritten using escape sequences. Transcoding a source\nfile from one encoding to another, and fixing up the encoding\ndeclaration, should preserve the meaning of the program. Python 2.x\nnon-Unicode strings violate this principle; Python 3000 bytes literals\nshouldn't.\n\nA string literal with a b in the prefix is always a syntax error in\nPython 2.5, so this syntax can be introduced in Python 2.6, along with\nthe bytes type.\n\nA bytes literal produces a new object each time it is evaluated, like\nlist displays and unlike string literals. This is necessary because\nbytes literals, like lists and unlike strings, are mutable[4].\n\nReference Implementation\n\nThomas Wouters has checked an implementation into the Py3K branch,\nr53872.\n\nReferences\n\nCopyright\n\nThis document has been placed in the public domain.\n\n[1] http://docs.python.org/reference/lexical_analysis.html#string-literals\n\n[2] http://docs.python.org/reference/lexical_analysis.html#encoding-declarations\n\n[3] http://docs.python.org/reference/lexical_analysis.html#string-literal-concatenation\n\n[4] https://mail.python.org/pipermail/python-3000/2007-February/005779.html"},"source":{"kind":"string","value":"python-peps"},"added":{"kind":"string","value":"2024-10-18T13:23:33.501353"},"created":{"kind":"timestamp","value":"2007-02-23T00:00:00","string":"2007-02-23T00:00:00"},"metadata":{"kind":"string","value":"{\n \"license\": \"Public Domain\",\n \"url\": \"https://peps.python.org/pep-3112/\",\n \"authors\": [\n \"Jason Orendorff\"\n ],\n \"pep_number\": \"3112\",\n \"pandoc_version\": \"3.5\"\n}"}}},{"rowIdx":571,"cells":{"id":{"kind":"string","value":"0415"},"text":{"kind":"string","value":"PEP: 415 Title: Implement context suppression with exception attributes\nVersion: $Revision$ Last-Modified: $Date$ Author: Benjamin Peterson\n BDFL-Delegate: Alyssa Coghlan Status: Final Type:\nStandards Track Content-Type: text/x-rst Created: 26-Feb-2012\nPython-Version: 3.3 Post-History: 26-Feb-2012 Replaces: 409 Resolution:\nhttps://mail.python.org/pipermail/python-dev/2012-May/119467.html\n\nAbstract\n\nPEP 409 introduced support for the raise exc from None construct to\nallow the display of the exception context to be explicitly suppressed.\nThis PEP retains the language level changes already implemented in PEP\n409, but replaces the underlying implementation mechanism with a simpler\napproach based on a new __suppress_context__ attribute on all\nBaseException instances.\n\nPEP Acceptance\n\nThis PEP was accepted by Alyssa Coghlan on the 14th of May, 2012.\n\nRationale\n\nPEP 409 changes __cause__ to be Ellipsis by default. Then if __cause__\nis set to None by raise exc from None, no context or cause will be\nprinted should the exception be uncaught.\n\nThe main problem with this scheme is it complicates the role of\n__cause__. __cause__ should indicate the cause of the exception not\nwhether __context__ should be printed or not. This use of __cause__ is\nalso not easily extended in the future. For example, we may someday want\nto allow the programmer to select which of __context__ and __cause__\nwill be printed. The PEP 409 implementation is not amenable to this.\n\nThe use of Ellipsis is a hack. Before PEP 409, Ellipsis was used\nexclusively in extended slicing. Extended slicing has nothing to do with\nexceptions, so it's not clear to someone inspecting an exception object\nwhy __cause__ should be set to Ellipsis. Using Ellipsis by default for\n__cause__ makes it asymmetrical with __context__.\n\nProposal\n\nA new attribute on BaseException, __suppress_context__, will be\nintroduced. Whenever __cause__ is set, __suppress_context__ will be set\nto True. In particular, raise exc from cause syntax will set\nexc.__suppress_context__ to True. Exception printing code will check for\nthat attribute to determine whether context and cause will be printed.\n__cause__ will return to its original purpose and values.\n\nThere is precedence for __suppress_context__ with the\nprint_line_and_file exception attribute.\n\nTo summarize, raise exc from cause will be equivalent to:\n\n exc.__cause__ = cause\n raise exc\n\nwhere exc.__cause__ = cause implicitly sets exc.__suppress_context__.\n\nPatches\n\nThere is a patch on Issue 14133.\n\nReferences\n\nCopyright\n\nThis document has been placed in the public domain.\n\n\f\n\n Local Variables: mode: indented-text indent-tabs-mode: nil\n sentence-end-double-space: t fill-column: 70 coding: utf-8 End:"},"source":{"kind":"string","value":"python-peps"},"added":{"kind":"string","value":"2024-10-18T13:23:33.508970"},"created":{"kind":"timestamp","value":"2012-02-26T00:00:00","string":"2012-02-26T00:00:00"},"metadata":{"kind":"string","value":"{\n \"license\": \"Public Domain\",\n \"url\": \"https://peps.python.org/pep-0415/\",\n \"authors\": [\n \"Benjamin Peterson\"\n ],\n \"pep_number\": \"0415\",\n \"pandoc_version\": \"3.5\"\n}"}}},{"rowIdx":572,"cells":{"id":{"kind":"string","value":"0254"},"text":{"kind":"string","value":"PEP: 254 Title: Making Classes Look More Like Types Author: Guido van\nRossum Status: Rejected Type: Standards Track\nContent-Type: text/x-rst Created: 18-Jun-2001 Python-Version: 2.2\nPost-History:\n\nAbstract\n\nThis PEP has not been written yet. Watch this space!\n\nStatus\n\nThis PEP was a stub entry and eventually abandoned without having been\nfilled-out. Substantially most of the intended functionality was\nimplemented in Py2.2 with new-style types and classes.\n\nCopyright\n\nThis document has been placed in the public domain."},"source":{"kind":"string","value":"python-peps"},"added":{"kind":"string","value":"2024-10-18T13:23:33.511848"},"created":{"kind":"timestamp","value":"2001-06-18T00:00:00","string":"2001-06-18T00:00:00"},"metadata":{"kind":"string","value":"{\n \"license\": \"Public Domain\",\n \"url\": \"https://peps.python.org/pep-0254/\",\n \"authors\": [\n \"Guido van Rossum\"\n ],\n \"pep_number\": \"0254\",\n \"pandoc_version\": \"3.5\"\n}"}}},{"rowIdx":573,"cells":{"id":{"kind":"string","value":"0607"},"text":{"kind":"string","value":"PEP: 607 Title: Reducing CPython's Feature Delivery Latency Version:\n$Revision$ Last-Modified: $Date$ Author: Łukasz Langa\n, Steve Dower , Alyssa\nCoghlan Discussions-To:\nhttps://discuss.python.org/t/pep-607-shared-background-for-the-release-cadence-peps/2528\nStatus: Final Type: Informational Content-Type: text/x-rst Created:\n11-Oct-2019 Python-Version: 3.9 Post-History: 20-Oct-2019\n\nAbstract\n\nPEP 602 and PEP 605 describe two alternative approaches to delivering\nsmaller collections of features to Python's users more frequently (as\ncompared to the current approach of offering new feature releases every\n18-24 months, with the first binary alpha release taking place 6-8\nmonths before the final release).\n\nBoth PEPs also propose moving to a release cadence that results in full\nreleases occurring at a consistent time of year (every year for PEP 602,\nevery other year for PEP 605).\n\nThis PEP (from the authors of both competing proposals) provides common\nbackground on why a change in the release cadence is considered\ndesirable, as well as the perceived risks that both PEPs attempt to\nmitigate.\n\nRationale for change\n\nReducing the size of feature delivery batches\n\nWhen multiple large changes are delivered together, a complex\ninvestigation may be required to determine the root cause of any new\nissues that arise. Large batch sizes also make it more likely that\nproblems will be encountered, given that they include larger pieces of\nrelatively untested code.\n\nThe easiest way to simplify those investigations and reduce the\nlikelihood of users encountering problems is to reduce the size of the\nbatches being shipped.\n\nPEP 602 proposes to address this problem via the straightforward\napproach of reducing CPython's typical batch size by 50%, shipping 12\nmonths of changes each time, rather than accumulating 18+ months of\nchanges.\n\nPEP 605 proposes to address it by regularly delivering 2 months worth of\nchanges to a subset of Python's user base that opts in to running a\nrolling stream of beta releases (similar to running Windows Insider\nbuilds instead of the Windows retail release, or running Debian testing\ninstead of Debian stable).\n\nReducing the latency of feature delivery\n\nWhen only stable releases are seeing significant user adoption, and\nthere's a long period of time between stable releases, it creates an\nincredibly strong temptation for developers to push changes into stable\nreleases before they're really ready for general use.\n\nPEP 602 proposes to address this problem by reducing the period of time\nbetween stable releases to 12 months rather than 18 months.\n\nPEP 605 proposes to address it by actively creating a community of\nPython users that regularly install and use CPython beta releases,\nproviding an incentive for core developers to start shipping changes\nearlier in the pre-release cycle, in order to obtain feedback before the\nfeature gets locked down in a stable release.\n\nAligning the release cadence with the calendar year\n\nWhile the current release cadence is nominally 18-24 months, in practice\nit has consistently been towards the 18 month end of that range. This\nmeans that the target dates for pre-releases and final releases move\naround from release to release, and the only way to remember them is to\neither look at the release PEP, or else to add those dates to your\ncalendar. This is annoying for both individual volunteers and for\ncorporate contributors, and also complicates alignment with events like\nPyCon US (typically April/May) and the now-annual core development\nsprints (typically in September).\n\nPEP 602 proposes to address this problem by publishing a new release in\nOctober every year, and basing the pre-release calendar for each year\noff that.\n\nPEP 605 proposes to address this problem by alternating between release\nyears (where a new stable release is published in August), and\nnon-release years (where only maintenance releases and new rolling beta\nreleases are published).\n\nImproving the pre-release design feedback cycle\n\nOne of the challenges of designing changes to the core interpreter and\nstandard library APIs is that the user base in a position to provide\nfeedback on nightly builds and the current pre-releases is relatively\nlimited. This means that much user feedback isn't received until after\nan API design has already shipped in a full X.Y.0 release.\n\nIf the API is a regular API, then deprecation cycles mean that it may\ntake literally years to correct any design mistakes identified at that\npoint. Marking APIs as provisional nominally offers a way to avoid that\nconstraint, but actually taking advantage of that freedom causes other\nproblems.\n\nPEP 602 proposes to address this problem by starting the alpha period\nimmediately after the previous stable release.\n\nPEP 605 proposes to address this problem by actively promoting adoption\nof CPython pre-releases for running production workloads (not just for\nlibrary and application compatibility testing), and adjusting the\npre-release management process as necessary to make that a reasonable\nthing to do.\n\n(Note: some standard library APIs are amenable to initially being\nshipped as part of separately versioned packages via PyPI, and only\nlater incorporated into the standard library. This section is more about\nthe lower level APIs and non-library features where that approach to\nobtaining early design feedback doesn't apply)\n\nRisks to be mitigated\n\nWhile the status quo could stand to be improved in some respects,\nPython's popularity indicates that a lot of users and other participants\nin the wider Python ecosystem are happy enough with the current release\nmanagement process.\n\nPython's user base is too large and too varied to cover all the\npotential downsides of changing our release cadence here, so instead\nthis section just covers some of the points that have been specifically\ntaken into account in the design of the PEPs.\n\nImpact on users and redistributors that already skip some releases\n\nIt is already the case that not all users and redistributors update to\nevery published CPython release series (for example, Debian stable and\nUbuntu LTS sometimes skip releases due to the mismatch between their\n24-month release cycles and CPython's typically 18-month cycle).\n\nThe faster 12-month full release cadence in PEP 602 means that users in\nthis category may end up skipping two releases where they would\npreviously have only skipped one. However, the extended notice period\nfor deprecations means that skipping a single release should no longer\nresult in missed deprecation warnings.\n\nThe slower 24-month full release cadence in PEP 605 may move some of the\nusers that have historically been in this category into the \"update to\nevery stable release\" category.\n\nImpact on users and redistributors that update to every release\n\nMany of Python's users never install a pre-release, but do update to\nevery stable release series at some point after it is published.\n\nPEP 602 aims to mitigate the potential negative impact on members of\nthis group by keeping the minimum gap between releases to 12 months, and\nretaining the 18 month full support period for each release.\n\nKeeping the 18-month full support period for each release branch means\nthat the branches will spend roughly the same amount of time in full\nsupport and security-fix-only mode as they do now (~18 months and ~42\nmonths, respectively).\n\nPEP 605 aims to mitigate the potential negative impact on members of\nthis group by increasing use during the pre-release period to achieve\nmore stable final releases with wider ecosystem support at launch.\n\nWith a 24-month release cadence each release branch will spend\nproportionally more time in full support mode and less time in\nsecurity-fix-only mode (~24 months and ~36 months, respectively).\n\nFull discussion of the impact on this group is left to the individual\nPEPs.\n\nImpact on users and redistributors of CPython nightly builds\n\nDespite the difficulties of doing so, there are already some users and\nredistributors that take on the challenge of using or publishing the\nCPython master branch directly.\n\nNeither PEP 602 nor PEP 605 should directly affect this group, but the\nrolling release stream proposal in PEP 605 aims to lower the barriers to\nmore users adopting this style of usage, by allowing them to adopt the\ntested rolling beta stream, rather than needing to use the master branch\ndirectly.\n\nImpact on maintainers of third party libraries\n\nFor maintainers of third party libraries, the key source of support\ncomplexity is the number of different Python versions in widespread use.\n\nPEP 602 aims to mitigate the potential negative impact on members of\nthis group by keeping the minimum gap between full releases to 12\nmonths.\n\nPEP 605 aims to mitigate the potential negative impact on members of\nthis group by increasing the gap between full releases to 24 months,\nretaining the current policy of moving each release branch to\nsecurity-fix-only mode not long after its successor is released, and\nretaining the \"beta\" naming scheme for the new rolling release stream\n(at least for the Python 3.9 release cycle).\n\nFull discussion of the impact on this group is left to the individual\nPEPs.\n\nCopyright\n\nThis document is placed in the public domain or under the\nCC0-1.0-Universal license, whichever is more permissive.\n\n\f\n\n Local Variables: mode: indented-text indent-tabs-mode: nil\n sentence-end-double-space: t fill-column: 72 coding: utf-8 End:"},"source":{"kind":"string","value":"python-peps"},"added":{"kind":"string","value":"2024-10-18T13:23:33.523645"},"created":{"kind":"timestamp","value":"2019-10-11T00:00:00","string":"2019-10-11T00:00:00"},"metadata":{"kind":"string","value":"{\n \"license\": \"Public Domain\",\n \"url\": \"https://peps.python.org/pep-0607/\",\n \"authors\": [\n \"Łukasz Langa\"\n ],\n \"pep_number\": \"0607\",\n \"pandoc_version\": \"3.5\"\n}"}}},{"rowIdx":574,"cells":{"id":{"kind":"string","value":"0310"},"text":{"kind":"string","value":"PEP: 310 Title: Reliable Acquisition/Release Pairs Version: $Revision$\nLast-Modified: $Date$ Author: Michael Hudson , Paul\nMoore Status: Rejected Type: Standards Track\nContent-Type: text/x-rst Created: 18-Dec-2002 Python-Version: 2.4\nPost-History:\n\nAbstract\n\nIt would be nice to have a less typing-intense way of writing:\n\n the_lock.acquire()\n try:\n ....\n finally:\n the_lock.release()\n\nThis PEP proposes a piece of syntax (a 'with' block) and a \"small-i\"\ninterface that generalizes the above.\n\nPronouncement\n\nThis PEP is rejected in favor of PEP 343.\n\nRationale\n\nOne of the advantages of Python's exception handling philosophy is that\nit makes it harder to do the \"wrong\" thing (e.g. failing to check the\nreturn value of some system call). Currently, this does not apply to\nresource cleanup. The current syntax for acquisition and release of a\nresource (for example, a lock) is:\n\n the_lock.acquire()\n try:\n ....\n finally:\n the_lock.release()\n\nThis syntax separates the acquisition and release by a (possibly large)\nblock of code, which makes it difficult to confirm \"at a glance\" that\nthe code manages the resource correctly. Another common error is to code\nthe \"acquire\" call within the try block, which incorrectly releases the\nlock if the acquire fails.\n\nBasic Syntax and Semantics\n\nThe syntax of a 'with' statement is as follows:\n\n 'with' [ var '=' ] expr ':'\n suite\n\nThis statement is defined as being equivalent to the following sequence\nof statements:\n\n var = expr\n\n if hasattr(var, \"__enter__\"):\n var.__enter__()\n\n try:\n suite\n\n finally:\n var.__exit__()\n\n(The presence of an __exit__ method is not checked like that of\n__enter__ to ensure that using inappropriate objects in with: statements\ngives an error).\n\nIf the variable is omitted, an unnamed object is allocated on the stack.\nIn that case, the suite has no access to the unnamed object.\n\nPossible Extensions\n\nA number of potential extensions to the basic syntax have been discussed\non the Python Developers list. None of these extensions are included in\nthe solution proposed by this PEP. In many cases, the arguments are\nnearly equally strong in both directions. In such cases, the PEP has\nalways chosen simplicity, simply because where extra power is needed,\nthe existing try block is available.\n\nMultiple expressions\n\nOne proposal was for allowing multiple expressions within one 'with'\nstatement. The __enter__ methods would be called left to right, and the\n__exit__ methods right to left. The advantage of doing so is that where\nmore than one resource is being managed, nested 'with' statements will\nresult in code drifting towards the right margin. The solution to this\nproblem is the same as for any other deep nesting - factor out some of\nthe code into a separate function. Furthermore, the question of what\nhappens if one of the __exit__ methods raises an exception (should the\nother __exit__ methods be called?) needs to be addressed.\n\nException handling\n\nAn extension to the protocol to include an optional __except__ handler,\nwhich is called when an exception is raised, and which can handle or\nre-raise the exception, has been suggested. It is not at all clear that\nthe semantics of this extension can be made precise and understandable.\nFor example, should the equivalent code be try ... except ... else if an\nexception handler is defined, and try ... finally if not? How can this\nbe determined at compile time, in general? The alternative is to define\nthe code as expanding to a try ... except inside a try ... finally. But\nthis may not do the right thing in real life.\n\nThe only use case identified for exception handling is with\ntransactional processing (commit on a clean finish, and rollback on an\nexception). This is probably just as easy to handle with a conventional\ntry ... except ... else block, and so the PEP does not include any\nsupport for exception handlers.\n\nImplementation Notes\n\nThere is a potential race condition in the code specified as equivalent\nto the with statement. For example, if a KeyboardInterrupt exception is\nraised between the completion of the __enter__ method call and the start\nof the try block, the __exit__ method will not be called. This can lead\nto resource leaks, or to deadlocks. [XXX Guido has stated that he cares\nabout this sort of race condition, and intends to write some C magic to\nhandle them. The implementation of the 'with' statement should copy\nthis.]\n\nOpen Issues\n\nShould existing classes (for example, file-like objects and locks) gain\nappropriate __enter__ and __exit__ methods? The obvious reason in favour\nis convenience (no adapter needed). The argument against is that if\nbuilt-in files have this but (say) StringIO does not, then code that\nuses \"with\" on a file object can't be reused with a StringIO object. So\n__exit__ = close becomes a part of the \"file-like object\" protocol,\nwhich user-defined classes may need to support.\n\nThe __enter__ hook may be unnecessary - for many use cases, an adapter\nclass is needed and in that case, the work done by the __enter__ hook\ncan just as easily be done in the __init__ hook.\n\nIf a way of controlling object lifetimes explicitly was available, the\nfunction of the __exit__ hook could be taken over by the existing\n__del__ hook. An email exchange[1] with a proponent of this approach\nleft one of the authors even more convinced that it isn't the right\nidea...\n\nIt has been suggested[2] that the \"__exit__\" method be called \"close\",\nor that a \"close\" method should be considered if no __exit__ method is\nfound, to increase the \"out-of-the-box utility\" of the \"with ...\"\nconstruct.\n\nThere are some similarities in concept between 'with ...' blocks and\ngenerators, which have led to proposals that for loops could implement\nthe with block functionality[3]. While neat on some levels, we think\nthat for loops should stick to being loops.\n\nAlternative Ideas\n\nIEXEC: Holger Krekel -- generalised approach with XML-like syntax (no\nURL found...).\n\nHolger has much more far-reaching ideas about \"execution monitors\" that\nare informed about details of control flow in the monitored block. While\ninteresting, these ideas could change the language in deep and subtle\nways and as such belong to a different PEP.\n\nAny Smalltalk/Ruby anonymous block style extension obviously subsumes\nthis one.\n\nPEP 319 is in the same area, but did not win support when aired on\npython-dev.\n\nBackwards Compatibility\n\nThis PEP proposes a new keyword, so the __future__ game will need to be\nplayed.\n\nCost of Adoption\n\nThose who claim the language is getting larger and more complicated have\nsomething else to complain about. It's something else to teach.\n\nFor the proposal to be useful, many file-like and lock-like classes in\nthe standard library and other code will have to have :\n\n __exit__ = close\n\nor similar added.\n\nCost of Non-Adoption\n\nWriting correct code continues to be more effort than writing incorrect\ncode.\n\nReferences\n\nThere are various python-list and python-dev discussions that could be\nmentioned here.\n\nCopyright\n\nThis document has been placed in the public domain.\n\n[1] Off-list conversation between Michael Hudson and Bill Soudan (made\npublic with permission) http://starship.python.net/crew/mwh/pep310/\n\n[2] Samuele Pedroni on python-dev\nhttps://mail.python.org/pipermail/python-dev/2003-August/037795.html\n\n[3] Thread on python-dev with subject\n[Python-Dev] pre-PEP: Resource-Release Support for Generators starting\nat https://mail.python.org/pipermail/python-dev/2003-August/037803.html"},"source":{"kind":"string","value":"python-peps"},"added":{"kind":"string","value":"2024-10-18T13:23:33.636347"},"created":{"kind":"timestamp","value":"2002-12-18T00:00:00","string":"2002-12-18T00:00:00"},"metadata":{"kind":"string","value":"{\n \"license\": \"Public Domain\",\n \"url\": \"https://peps.python.org/pep-0310/\",\n \"authors\": [\n \"Michael Hudson\"\n ],\n \"pep_number\": \"0310\",\n \"pandoc_version\": \"3.5\"\n}"}}},{"rowIdx":575,"cells":{"id":{"kind":"string","value":"0755"},"text":{"kind":"string","value":"PEP: 755 Title: Implicit namespace policy for PyPI Author: Ofek Lev\n Sponsor: Barry Warsaw \nPEP-Delegate: Dustin Ingram Discussions-To:\nhttps://discuss.python.org/t/63191 Status: Draft Type: Process Topic:\nPackaging Created: 05-Sep-2024 Post-History: 07-Sep-2024,\n\nAbstract\n\nThis PEP codifies an implementation of PEP 752 for PyPI[1].\n\nMotivation\n\nMany projects and communities would benefit from the ability to reserve\nnamespaces. Since PyPI exists to serve the Python community, it is\ncritical to gather feedback to ensure that everyone's needs are met.\n\nA dedicated PEP is required because the operational and policy nuances\nare up to each package repository to decide.\n\nRationale\n\nPyPI has been understaffed, receiving the first dedicated specialist in\nJuly 2024. Due to lack of resources, user support has been lacking for\npackage name claims, organization requests, storage limit increases, and\neven account recovery.\n\nThe default policy of giving paid organizations more leniency when\nreserving namespaces provides the following benefits:\n\n- PyPI would have a constant source of funding for support\n specialists, infrastructure maintenance, bug fixes and new features.\n- Although each application would require independent review, less\n human feedback would be required because the process to approve a\n paid organization already bestows a certain amount of trust.\n\nTerminology\n\nPaid/Corporate Organization\n\n Corporate organizations are organizations <752#organizations> that\n pay for special functionality on PyPI. This PEP refers to them as\n paid in most circumstances for brevity and to ease understanding for\n non-native speakers.\n\nRoot Grant\n\n A grant as defined by PEP 752 terminology <752#terminology>.\n\nChild Grant\n\n A grant created from a root grant with the associated namespace\n being a child namespace as defined by\n PEP 752 terminology <752#terminology>.\n\nImplementation\n\nGrant Applications\n\nSubmission\n\nOnly organization (non-user) accounts have access to the grant\napplication form.\n\nApplications for paid organizations receive priority in the reviewing\nqueue. This is both to offer a meaningful benefit to paid organizations\nand to ensure that funding is available for PyPI's operational costs,\nincluding more reviewers.\n\nApproval Criteria\n\n1. The namespace must not be something common like tool or apps.\n2. The namespace should be greater than three characters.\n3. The namespace should properly and clearly identify the reservation\n owner.\n4. The organization should be actively using the namespace.\n5. There should be evidence that not reserving the namespace may cause\n ambiguity, confusion, or other harm to the community.\n\nOrganizations that are not paid organizations will represent one of the\nfollowing:\n\n- Large, popular open-source projects with many packages\n- Universities that actively publish packages\n- Government organizations that actively publish packages\n- NPOs/NGOs that actively publish packages like Our World in Data\n\nGenerally speaking, reviewers should be more tolerant of paid\norganizations that apply for grants for which they are not yet using.\n\nFor example, while it's reasonable to grant a namespace to a startup or\nan existing company with a new product line, it's not as reasonable to\ngrant a namespace to a community project that doesn't have many users.\n\nRejections\n\nRejected applications will receive clear rationale for the decision\nbased on the approval criteria. Applications rejected due to the\nnamespace being too common will be persisted internally for future\nreviewers to reference and new applications attempting to reserve a\nnamespace that was previously rejected for that reason will display a\nwarning.\n\nAcceptance\n\nWhen an application is accepted for a namespace that is used by projects\noutside of the organization, an email will be sent to the owners of the\nprojects notifying them of the new grant. The email will contain a link\nto the namespace's page.\n\nGrant Types\n\nThere are two types of grants.\n\nRoot Grant\n\nAn organization gets a root grant for every approved application. This\ngrant may produce any number of child grants.\n\nChild Grant\n\nA child grant may be created by the owner of a root grant at any time\nwithout approval. The namespace associated with such grants must be a\nchild namespace of the root grant's namespace.\n\nChild grants cannot have their own child grants.\n\nGrant Ownership\n\nThe owner of a grant may allow any number of other organizations to use\nthe grant. The grants behave as if they were owned by the organization.\nThe owner may revoke this permission at any time.\n\nThe owner may transfer ownership to another organization at any time\nwithout approval from PyPI admins. If the organization is a paid\norganization, the target for transfer must also be a paid organization.\nSettings for permitted organizations are transferred as well.\n\nUser Interface\n\nNamespace Page\n\nThe namespace of every active grant will have its own page that has\ninformation such as its open <752#open-namespaces> status, the current\nowners, the time at which ownership was granted and the total number of\nprojects that match the namespace.\n\nProject Page\n\nEvery project's page (example) that matches an active namespace grant\nwill indicate what the prefix is (NuGet currently does not do this) and\nwill stand out as a pill or label. This value will match the prefix key\nin the namespace detail API <752#namespace-detail>.\n\nClicking on the namespace will take the user to its page.\n\nVisual Indicators\n\nFor projects that match an active namespace grant, users will be able to\nquickly ascertain which of the following scenarios apply:\n\n1. Projects that are tied to a grant owner will not have a visual\n indicator and users should solely rely on the always-present prefix.\n2. Projects that are not tied to a grant owner and the matching grant\n is open <752#open-namespaces> will have a unique indicator that does\n not convey mistrust or danger. A good choice might be the users icon\n from Font Awesome or the groups icon from Google Fonts.\n3. Projects that are not tied to a grant owner and the matching grant\n is restricted will have a unique visual indicator. This situation\n arises when the project existed before the grant was created. The\n indicator will convey inauthenticity or lack of trust. A good choice\n might be a warning sign (⚠).\n\nOpen Namespaces\n\nWhen a child grant is created, its open <752#open-namespaces> status\nwill be inherited from the root grant. Owners of child grants may make\nthem open at any time. If a grant is open, it cannot be made restricted\nunless the owner of the grant is the owner of every project that matches\nthe namespace.\n\nGrant Removal\n\nIf a grant is shared with other organizations, the owner organization\nmust initiate a transfer as a prerequisite for organization deletion.\n\nIf a grant is not shared, the owner may unclaim the namespace in either\nof the following circumstances:\n\n- The organization manually removes themselves as the owner.\n- The organization is deleted.\n\nWhen a reserved namespace becomes unclaimed, the UI will reflect this\nsuch that matching projects will no longer have any indicators on their\npage nor will the namespace have a dedicated page.\n\nHow to Teach This\n\nFor organizations, we will document how to reserve namespaces, what the\nbenefits are and pricing.\n\nWe will document PEP 541 on the same pages so that organizations are\naware of the main mechanism to report improper uses of existing packages\nmatching their grants.\n\nRejected Ideas\n\nPage for Viewing All Active Grants\n\nThere is no page to view all active namespace grants because this has\nthe potential to leak private information such as upcoming products.\n\nVisual Indicator for Owned Projects\n\nThere is no indicator for projects that are tied to a grant owner\nprimarily to reduce clutter, especially since this is the most common\nscenario.\n\nIf there was an indicator, it would not be a check mark or similar as\nNuGet chose because it may mistakingly convey that there are associated\nsecurity guarantees inherent to the use of the package. Additionally,\nsome social media platforms use a check mark for verified users which\nmay cause confusion.\n\nReferences\n\nCopyright\n\nThis document is placed in the public domain or under the\nCC0-1.0-Universal license, whichever is more permissive.\n\n[1] The Python Package Index (https://pypi.org)"},"source":{"kind":"string","value":"python-peps"},"added":{"kind":"string","value":"2024-10-18T13:23:33.657319"},"created":{"kind":"timestamp","value":"2024-09-05T00:00:00","string":"2024-09-05T00:00:00"},"metadata":{"kind":"string","value":"{\n \"license\": \"Public Domain\",\n \"url\": \"https://peps.python.org/pep-0755/\",\n \"authors\": [\n \"Ofek Lev\"\n ],\n \"pep_number\": \"0755\",\n \"pandoc_version\": \"3.5\"\n}"}}},{"rowIdx":576,"cells":{"id":{"kind":"string","value":"0376"},"text":{"kind":"string","value":"PEP: 376 Title: Database of Installed Python Distributions Author: Tarek\nZiadé Status: Final Type: Standards Track Topic:\nPackaging Content-Type: text/x-rst Created: 22-Feb-2009 Python-Version:\n2.7, 3.2 Post-History: 22-Jun-2009\n\npackaging:core-metadata\n\nAbstract\n\nThe goal of this PEP is to provide a standard infrastructure to manage\nproject distributions installed on a system, so all tools that are\ninstalling or removing projects are interoperable.\n\nTo achieve this goal, the PEP proposes a new format to describe\ninstalled distributions on a system. It also describes a reference\nimplementation for the standard library.\n\nIn the past an attempt was made to create an installation database (see\nPEP 262).\n\nCombined with PEP 345, the current proposal supersedes PEP 262.\n\nNote: the implementation plan didn't go as expected, so it should be\nconsidered informative only for this PEP.\n\nRationale\n\nThere are two problems right now in the way distributions are installed\nin Python:\n\n- There are too many ways to do it and this makes interoperation\n difficult.\n- There is no API to get information on installed distributions.\n\nHow distributions are installed\n\nRight now, when a distribution is installed in Python, every element can\nbe installed in a different directory.\n\nFor instance, Distutils installs the pure Python code in the purelib\ndirectory, which is lib/python2.6/site-packages for unix-like systems\nand Mac OS X, or Lib\\site-packages under Python's installation directory\nfor Windows.\n\nAdditionally, the install_egg_info subcommand of the Distutils install\ncommand adds an .egg-info file for the project into the purelib\ndirectory.\n\nFor example, for the docutils distribution, which contains one package\nan extra module and executable scripts, three elements are installed in\nsite-packages:\n\n- docutils: The docutils package.\n- roman.py: An extra module used by docutils.\n- docutils-0.5-py2.6.egg-info: A file containing the distribution\n metadata as described in PEP 314. This file corresponds to the file\n called PKG-INFO, built by the sdist command.\n\nSome executable scripts, such as rst2html.py, are also added in the bin\ndirectory of the Python installation.\n\nAnother project called setuptools[1] has two other formats to install\ndistributions, called EggFormats[2]:\n\n- a self-contained .egg directory, that contains all the distribution\n files and the distribution metadata in a file called PKG-INFO in a\n subdirectory called EGG-INFO. setuptools creates other files in that\n directory that can be considered as complementary metadata.\n- an .egg-info directory installed in site-packages, that contains the\n same files EGG-INFO has in the .egg format.\n\nThe first format is automatically used when you install a distribution\nthat uses the setuptools.setup function in its setup.py file, instead of\nthe distutils.core.setup one.\n\nsetuptools also add a reference to the distribution into an\neasy-install.pth file.\n\nLast, the setuptools project provides an executable script called\neasy_install[3] that installs all distributions, including\ndistutils-based ones in self-contained .egg directories.\n\nIf you want to have standalone .egg-info directories for your\ndistributions, e.g. the second setuptools format, you have to force it\nwhen you work with a setuptools-based distribution or with the\neasy_install script. You can force it by using the\n--single-version-externally-managed option or the --root option. This\nwill make the setuptools project install the project like distutils\ndoes.\n\nThis option is used by :\n\n- the pip[4] installer\n- the Fedora packagers[5].\n- the Debian packagers[6].\n\nUninstall information\n\nDistutils doesn't provide an uninstall command. If you want to uninstall\na distribution, you have to be a power user and remove the various\nelements that were installed, and then look over the .pth file to clean\nthem if necessary.\n\nAnd the process differs depending on the tools you have used to install\nthe distribution and if the distribution's setup.py uses Distutils or\nSetuptools.\n\nUnder some circumstances, you might not be able to know for sure that\nyou have removed everything, or that you didn't break another\ndistribution by removing a file that is shared among several\ndistributions.\n\nBut there's a common behavior: when you install a distribution, files\nare copied in your system. And it's possible to keep track of these\nfiles for later removal.\n\nMoreover, the Pip project has gained an uninstall feature lately. It\nrecords all installed files, using the record option of the install\ncommand.\n\nWhat this PEP proposes\n\nTo address those issues, this PEP proposes a few changes:\n\n- A new .dist-info structure using a directory, inspired on one format\n of the EggFormats standard from setuptools.\n- New APIs in pkgutil to be able to query the information of installed\n distributions.\n- An uninstall function and an uninstall script in Distutils.\n\nOne .dist-info directory per installed distribution\n\nThis PEP proposes an installation format inspired by one of the options\nin the EggFormats standard, the one that uses a distinct directory\nlocated in the site-packages directory.\n\nThis distinct directory is named as follows:\n\n name + '-' + version + '.dist-info'\n\nThis .dist-info directory can contain these files:\n\n- METADATA: contains metadata, as described in PEP 345, PEP 314 and\n PEP 241.\n- RECORD: records the list of installed files\n- INSTALLER: records the name of the tool used to install the project\n- REQUESTED: the presence of this file indicates that the project\n installation was explicitly requested (i.e., not installed as a\n dependency).\n\nThe METADATA, RECORD and INSTALLER files are mandatory, while REQUESTED\nmay be missing.\n\nThis proposal will not impact Python itself because the metadata files\nare not used anywhere yet in the standard library besides Distutils.\n\nIt will impact the setuptools and pip projects but, given the fact that\nthey already work with a directory that contains a PKG-INFO file, the\nchange will have no deep consequences.\n\nRECORD\n\nA RECORD file is added inside the .dist-info directory at installation\ntime when installing a source distribution using the install command.\nNotice that when installing a binary distribution created with bdist\ncommand or a bdist-based command, the RECORD file will be installed as\nwell since these commands use the install command to create binary\ndistributions.\n\nThe RECORD file holds the list of installed files. These correspond to\nthe files listed by the record option of the install command, and will\nbe generated by default. This allows the implementation of an\nuninstallation feature, as explained later in this PEP. The install\ncommand also provides an option to prevent the RECORD file from being\nwritten and this option should be used when creating system packages.\n\nThird-party installation tools also should not overwrite or delete files\nthat are not in a RECORD file without prompting or warning.\n\nThis RECORD file is inspired from PEP 262 FILES.\n\nThe RECORD file is a CSV file, composed of records, one line per\ninstalled file. The csv module is used to read the file, with these\noptions:\n\n- field delimiter : ,\n- quoting char : \".\n- line terminator : os.linesep (so \\r\\n or \\n)\n\nWhen a distribution is installed, files can be installed under:\n\n- the base location: path defined by the --install-lib option, which\n defaults to the site-packages directory.\n- the installation prefix: path defined by the --prefix option, which\n defaults to sys.prefix.\n- any other path on the system.\n\nEach record is composed of three elements:\n\n- the file's path\n\n - a '/'-separated path, relative to the base location, if the file\n is under the base location.\n - a '/'-separated path, relative to the base location, if the file\n is under the installation prefix AND if the base location is a\n subpath of the installation prefix.\n - an absolute path, using the local platform separator\n\n- a hash of the file's contents. Notice that pyc and pyo generated\n files don't have any hash because they are automatically produced\n from py files. So checking the hash of the corresponding py file is\n enough to decide if the file and its associated pyc or pyo files\n have changed.\n\n The hash is either the empty string or the hash algorithm as named\n in hashlib.algorithms_guaranteed, followed by the equals character\n =, followed by the urlsafe-base64-nopad encoding of the digest\n (base64.urlsafe_b64encode(digest) with trailing = removed).\n\n- the file's size in bytes\n\nThe csv module is used to generate this file, so the field separator is\n\",\". Any \",\" character found within a field is escaped automatically by\ncsv.\n\nWhen the file is read, the U option is used so the universal newline\nsupport (see PEP 278) is activated, avoiding any trouble reading a file\nproduced on a platform that uses a different new line terminator.\n\nHere's an example of a RECORD file (extract):\n\n lib/python2.6/site-packages/docutils/__init__.py,md5=nWt-Dge1eug4iAgqLS_uWg,9544\n lib/python2.6/site-packages/docutils/__init__.pyc,,\n lib/python2.6/site-packages/docutils/core.py,md5=X90C_JLIcC78PL74iuhPnA,66188\n lib/python2.6/site-packages/docutils/core.pyc,,\n lib/python2.6/site-packages/roman.py,md5=7YhfNczihNjOY0FXlupwBg,234\n lib/python2.6/site-packages/roman.pyc,,\n /usr/local/bin/rst2html.py,md5=g22D3amDLJP-FhBzCi7EvA,234\n /usr/local/bin/rst2html.pyc,,\n python2.6/site-packages/docutils-0.5.dist-info/METADATA,md5=ovJyUNzXdArGfmVyb0onyA,195\n lib/python2.6/site-packages/docutils-0.5.dist-info/RECORD,,\n\nNotice that the RECORD file can't contain a hash of itself and is just\nmentioned here\n\nA project that installs a config.ini file in /etc/myapp will be added\nlike this:\n\n /etc/myapp/config.ini,md5=gLfd6IANquzGLhOkW4Mfgg,9544\n\nFor a windows platform, the drive letter is added for the absolute\npaths, so a file that is copied in c:MyAppwill be:\n\n c:\\etc\\myapp\\config.ini,md5=gLfd6IANquzGLhOkW4Mfgg,9544\n\nINSTALLER\n\nThe install command has a new option called installer. This option is\nthe name of the tool used to invoke the installation. It's a normalized\nlower-case string matching [a-z0-9_\\-\\.].\n\n $ python setup.py install --installer=pkg-system\n\nIt defaults to distutils if not provided.\n\nWhen a distribution is installed, the INSTALLER file is generated in the\n.dist-info directory with this value, to keep track of who installed the\ndistribution. The file is a single-line text file.\n\nREQUESTED\n\nSome install tools automatically detect unfulfilled dependencies and\ninstall them. In these cases, it is useful to track which distributions\nwere installed purely as a dependency, so if their dependent\ndistribution is later uninstalled, the user can be alerted of the\norphaned dependency.\n\nIf a distribution is installed by direct user request (the usual case),\na file REQUESTED is added to the .dist-info directory of the installed\ndistribution. The REQUESTED file may be empty, or may contain a marker\ncomment line beginning with the \"#\" character.\n\nIf an install tool installs a distribution automatically, as a\ndependency of another distribution, the REQUESTED file should not be\ncreated.\n\nThe install command of distutils by default creates the REQUESTED file.\nIt accepts --requested and --no-requested options to explicitly specify\nwhether the file is created.\n\nIf a distribution that was already installed on the system as a\ndependency is later installed by name, the distutils install command\nwill create the REQUESTED file in the .dist-info directory of the\nexisting installation.\n\nImplementation details\n\nNote: this section is non-normative. In the end, this PEP was\nimplemented by third-party libraries and tools, not the standard\nlibrary.\n\nNew functions and classes in pkgutil\n\nTo use the .dist-info directory content, we need to add in the standard\nlibrary a set of APIs. The best place to put these APIs is pkgutil.\n\nFunctions\n\nThe new functions added in the pkgutil module are :\n\n- distinfo_dirname(name, version) -> directory name\n\n name is converted to a standard distribution name by replacing any\n runs of non-alphanumeric characters with a single '-'.\n\n version is converted to a standard version string. Spaces become\n dots, and all other non-alphanumeric characters (except dots)\n become dashes, with runs of multiple dashes condensed to a single\n dash.\n\n Both attributes are then converted into their filename-escaped\n form, i.e. any '-' characters are replaced with '_' other than the\n one in 'dist-info' and the one separating the name from the\n version number.\n\n- get_distributions() -> iterator of Distribution instances.\n\n Provides an iterator that looks for .dist-info directories in\n sys.path and returns Distribution instances for each one of them.\n\n- get_distribution(name) -> Distribution or None.\n\n- obsoletes_distribution(name, version=None) -> iterator of\n Distribution instances.\n\n Iterates over all distributions to find which distributions obsolete\n name. If a version is provided, it will be used to filter the\n results.\n\n- provides_distribution(name, version=None) -> iterator of\n Distribution instances.\n\n Iterates over all distributions to find which distributions provide\n name. If a version is provided, it will be used to filter the\n results. Scans all elements in sys.path and looks for all\n directories ending with .dist-info. Returns a Distribution\n corresponding to the .dist-info directory that contains a METADATA\n that matches name for the name metadata.\n\n This function only returns the first result founded, since no more\n than one values are expected. If the directory is not found, returns\n None.\n\n- get_file_users(path) -> iterator of Distribution instances.\n\n Iterates over all distributions to find out which distributions uses\n path. path can be a local absolute path or a relative '/'-separated\n path.\n\n A local absolute path is an absolute path in which occurrences of\n '/' have been replaced by the system separator given by os.sep.\n\nDistribution class\n\nA new class called Distribution is created with the path of the\n.dist-info directory provided to the constructor. It reads the metadata\ncontained in METADATA when it is instantiated.\n\nDistribution(path) -> instance\n\n Creates a Distribution instance for the given path.\n\nDistribution provides the following attributes:\n\n- name: The name of the distribution.\n- metadata: A DistributionMetadata instance loaded with the\n distribution's METADATA file.\n- requested: A boolean that indicates whether the REQUESTED metadata\n file is present (in other words, whether the distribution was\n installed by user request).\n\nAnd following methods:\n\n- get_installed_files(local=False) -> iterator of (path, hash, size)\n\n Iterates over the RECORD entries and return a tuple\n (path, hash, size) for each line. If local is True, the path is\n transformed into a local absolute path. Otherwise the raw value from\n RECORD is returned.\n\n A local absolute path is an absolute path in which occurrences of\n '/' have been replaced by the system separator given by os.sep.\n\n- uses(path) -> Boolean\n\n Returns True if path is listed in RECORD. path can be a local\n absolute path or a relative '/'-separated path.\n\n- get_distinfo_file(path, binary=False) -> file object\n\n Returns a file located under the .dist-info directory.\n\n Returns a file instance for the file pointed by path.\n\n path has to be a '/'-separated path relative to the .dist-info\n directory or an absolute path.\n\n If path is an absolute path and doesn't start with the .dist-info\n directory path, a DistutilsError is raised.\n\n If binary is True, opens the file in read-only binary mode (rb),\n otherwise opens it in read-only mode (r).\n\n- get_distinfo_files(local=False) -> iterator of paths\n\n Iterates over the RECORD entries and returns paths for each line if\n the path is pointing to a file located in the .dist-info directory\n or one of its subdirectories.\n\n If local is True, each path is transformed into a local absolute\n path. Otherwise the raw value from RECORD is returned.\n\nNotice that the API is organized in five classes that work with\ndirectories and Zip files (so it works with files included in Zip files,\nsee PEP 273 for more details). These classes are described in the\ndocumentation of the prototype implementation for interested readers[7].\n\nExamples\n\nLet's use some of the new APIs with our docutils example:\n\n >>> from pkgutil import get_distribution, get_file_users, distinfo_dirname\n >>> dist = get_distribution('docutils')\n >>> dist.name\n 'docutils'\n >>> dist.metadata.version\n '0.5'\n\n >>> distinfo_dirname('docutils', '0.5')\n 'docutils-0.5.dist-info'\n\n >>> distinfo_dirname('python-ldap', '2.5')\n 'python_ldap-2.5.dist-info'\n\n >>> distinfo_dirname('python-ldap', '2.5 a---5')\n 'python_ldap-2.5.a_5.dist-info'\n\n >>> for path, hash, size in dist.get_installed_files()::\n ... print '%s %s %d' % (path, hash, size)\n ...\n python2.6/site-packages/docutils/__init__.py,b690274f621402dda63bf11ba5373bf2,9544\n python2.6/site-packages/docutils/core.py,9c4b84aff68aa55f2e9bf70481b94333,66188\n python2.6/site-packages/roman.py,a4b84aff68aa55f2e9bf70481b943D3,234\n /usr/local/bin/rst2html.py,a4b84aff68aa55f2e9bf70481b943D3,234\n python2.6/site-packages/docutils-0.5.dist-info/METADATA,6fe57de576d749536082d8e205b77748,195\n python2.6/site-packages/docutils-0.5.dist-info/RECORD\n\n >>> dist.uses('docutils/core.py')\n True\n\n >>> dist.uses('/usr/local/bin/rst2html.py')\n True\n\n >>> dist.get_distinfo_file('METADATA')\n \n\n >>> dist.requested\n True\n\nNew functions in Distutils\n\nDistutils already provides a very basic way to install a distribution,\nwhich is running the install command over the setup.py script of the\ndistribution.\n\nDistutils2 <262> will provide a very basic uninstall function, that is\nadded in distutils2.util and takes the name of the distribution to\nuninstall as its argument. uninstall uses the APIs described earlier and\nremove all unique files, as long as their hash didn't change. Then it\nremoves empty directories left behind.\n\nuninstall returns a list of uninstalled files:\n\n >>> from distutils2.util import uninstall\n >>> uninstall('docutils')\n ['/opt/local/lib/python2.6/site-packages/docutils/core.py',\n ...\n '/opt/local/lib/python2.6/site-packages/docutils/__init__.py']\n\nIf the distribution is not found, a DistutilsUninstallError is raised.\n\nFiltering\n\nTo make it a reference API for third-party projects that wish to control\nhow uninstall works, a second callable argument can be used. It's called\nfor each file that is removed. If the callable returns True, the file is\nremoved. If it returns False, it's left alone.\n\nExamples:\n\n >>> def _remove_and_log(path):\n ... logging.info('Removing %s' % path)\n ... return True\n ...\n >>> uninstall('docutils', _remove_and_log)\n\n >>> def _dry_run(path):\n ... logging.info('Removing %s (dry run)' % path)\n ... return False\n ...\n >>> uninstall('docutils', _dry_run)\n\nOf course, a third-party tool can use lower-level pkgutil APIs to\nimplement its own uninstall feature.\n\nInstaller marker\n\nAs explained earlier in this PEP, the install command adds an INSTALLER\nfile in the .dist-info directory with the name of the installer.\n\nTo avoid removing distributions that were installed by another packaging\nsystem, the uninstall function takes an extra argument installer which\ndefaults to distutils2.\n\nWhen called, uninstall controls that the INSTALLER file matches this\nargument. If not, it raises a DistutilsUninstallError:\n\n >>> uninstall('docutils')\n Traceback (most recent call last):\n ...\n DistutilsUninstallError: docutils was installed by 'cool-pkg-manager'\n\n >>> uninstall('docutils', installer='cool-pkg-manager')\n\nThis allows a third-party application to use the uninstall function and\nstrongly suggest that no other program remove a distribution it has\npreviously installed. This is useful when a third-party program that\nrelies on Distutils APIs does extra steps on the system at installation\ntime, it has to undo at uninstallation time.\n\nAdding an Uninstall script\n\nAn uninstall script is added in Distutils2. and is used like this:\n\n $ python -m distutils2.uninstall projectname\n\nNotice that script doesn't control if the removal of a distribution\nbreaks another distribution. Although it makes sure that all the files\nit removes are not used by any other distribution, by using the\nuninstall function.\n\nAlso note that this uninstall script pays no attention to the REQUESTED\nmetadata; that is provided only for use by external tools to provide\nmore advanced dependency management.\n\nBackward compatibility and roadmap\n\nThese changes don't introduce any compatibility problems since they will\nbe implemented in:\n\n- pkgutil in new functions\n- distutils2\n\nThe plan is to include the functionality outlined in this PEP in pkgutil\nfor Python 3.2, and in Distutils2.\n\nDistutils2 will also contain a backport of the new pgkutil, and can be\nused for 2.4 onward.\n\nDistributions installed using existing, pre-standardization formats do\nnot have the necessary metadata available for the new API, and thus will\nbe ignored. Third-party tools may of course to continue to support\nprevious formats in addition to the new format, in order to ease the\ntransition.\n\nReferences\n\nAcknowledgements\n\nJim Fulton, Ian Bicking, Phillip Eby, Rafael Villar Burke, and many\npeople at Pycon and Distutils-SIG.\n\nCopyright\n\nThis document has been placed in the public domain.\n\n\f\n\n Local Variables: mode: indented-text indent-tabs-mode: nil\n sentence-end-double-space: t fill-column: 70 coding: utf-8 End:\n\n[1] http://peak.telecommunity.com/DevCenter/setuptools\n\n[2] http://peak.telecommunity.com/DevCenter/EggFormats\n\n[3] http://peak.telecommunity.com/DevCenter/EasyInstall\n\n[4] http://pypi.python.org/pypi/pip\n\n[5] http://fedoraproject.org/wiki/Packaging/Python/Eggs#Providing_Eggs_using_Setuptools\n\n[6] http://wiki.debian.org/DebianPython/NewPolicy\n\n[7] http://bitbucket.org/tarek/pep376/"},"source":{"kind":"string","value":"python-peps"},"added":{"kind":"string","value":"2024-10-18T13:23:33.694481"},"created":{"kind":"timestamp","value":"2009-02-22T00:00:00","string":"2009-02-22T00:00:00"},"metadata":{"kind":"string","value":"{\n \"license\": \"Public Domain\",\n \"url\": \"https://peps.python.org/pep-0376/\",\n \"authors\": [\n \"Tarek Ziadé\"\n ],\n \"pep_number\": \"0376\",\n \"pandoc_version\": \"3.5\"\n}"}}},{"rowIdx":577,"cells":{"id":{"kind":"string","value":"0286"},"text":{"kind":"string","value":"PEP: 286 Title: Enhanced Argument Tuples Author: Martin von Löwis\n Status: Deferred Type: Standards Track\nContent-Type: text/x-rst Created: 03-Mar-2002 Python-Version: 2.3\nPost-History:\n\nAbstract\n\nPyArg_ParseTuple is confronted with difficult memory management if an\nargument converter creates new memory. To deal with these cases, a\nspecialized argument type is proposed.\n\nPEP Deferral\n\nFurther exploration of the concepts covered in this PEP has been\ndeferred for lack of a current champion interested in promoting the\ngoals of the PEP and collecting and incorporating feedback, and with\nsufficient available time to do so effectively.\n\nThe resolution of this PEP may also be affected by the resolution of PEP\n426, which proposes the use of a preprocessing step to generate some\naspects of C API interface code.\n\nProblem description\n\nToday, argument tuples keep references to the function arguments, which\nare guaranteed to live as long as the argument tuple exists which is at\nleast as long as the function call is being executed.\n\nIn some cases, parsing an argument will allocate new memory, which is\nthen to be released by the caller. This has two problems:\n\n1. In case of failure, the application cannot know what memory to\n release; most callers don't even know that they have the\n responsibility to release that memory. Example for this are the N\n converter (bug #416288[1]) and the es# converter (bug #501716[2]).\n2. Even for successful argument parsing, it is still inconvenient for\n the caller to be responsible for releasing the memory. In some\n cases, this is unnecessarily inefficient. For example, the es\n converter copies the conversion result into memory, even though\n there already is a string object that has the right contents.\n\nProposed solution\n\nA new type 'argument tuple' is introduced. This type derives from tuple,\nadding an __dict__ member (at tp_dictoffset -4). Instances of this type\nmight get the following attributes:\n\n- 'failobjects', a list of objects which need to be deallocated in\n case of success\n- 'okobjects', a list of object which will be released when the\n argument tuple is released\n\nTo manage this type, the following functions will be added, and used\nappropriately in ceval.c and getargs.c:\n\n- PyArgTuple_New(int);\n- PyArgTuple_AddFailObject(PyObject*, PyObject*);\n- PyArgTuple_AddFailMemory(PyObject*, void*);\n- PyArgTuple_AddOkObject(PyObject*, PyObject*);\n- PyArgTuple_AddOkMemory(PyObject*, void*);\n- PyArgTuple_ClearFailed(PyObject*);\n\nWhen argument parsing fails, all fail objects will be released through\nPy_DECREF, and all fail memory will be released through PyMem_Free. If\nparsing succeeds, the references to the fail objects and fail memory are\ndropped, without releasing anything.\n\nWhen the argument tuple is released, all ok objects and memory will be\nreleased.\n\nIf those functions are called with an object of a different type, a\nwarning is issued and no further action is taken; usage of the affected\nconverters without using argument tuples is deprecated.\n\nAffected converters\n\nThe following converters will add fail memory and fail objects: N, es,\net, es#, et# (unless memory is passed into the converter)\n\nNew converters\n\nTo simplify Unicode conversion, the e* converters are duplicated as E*\nconverters (Es, Et, Es#, Et#). The usage of the E* converters is\nidentical to that of the e* converters, except that the application will\nnot need to manage the resulting memory. This will be implemented\nthrough registration of Ok objects with the argument tuple. The e*\nconverters are deprecated.\n\nReferences\n\nCopyright\n\nThis document has been placed in the public domain.\n\n[1] infrequent memory leak in pyexpat\n(http://bugs.python.org/issue416288)\n\n[2] \"es#\" parser marker leaks memory\n(http://bugs.python.org/issue501716)"},"source":{"kind":"string","value":"python-peps"},"added":{"kind":"string","value":"2024-10-18T13:23:33.704794"},"created":{"kind":"timestamp","value":"2002-03-03T00:00:00","string":"2002-03-03T00:00:00"},"metadata":{"kind":"string","value":"{\n \"license\": \"Public Domain\",\n \"url\": \"https://peps.python.org/pep-0286/\",\n \"authors\": [\n \"Martin von Löwis\"\n ],\n \"pep_number\": \"0286\",\n \"pandoc_version\": \"3.5\"\n}"}}},{"rowIdx":578,"cells":{"id":{"kind":"string","value":"0402"},"text":{"kind":"string","value":"PEP: 402 Title: Simplified Package Layout and Partitioning Author:\nPhillip J. Eby Status: Rejected Type: Standards Track Topic: Packaging\nContent-Type: text/x-rst Created: 12-Jul-2011 Python-Version: 3.3\nPost-History: 20-Jul-2011 Replaces: 382\n\nRejection Notice\n\nOn the first day of sprints at US PyCon 2012 we had a long and fruitful\ndiscussion about PEP 382 and PEP 402. We ended up rejecting both but a\nnew PEP will be written to carry on in the spirit of PEP 402. Martin von\nLöwis wrote up a summary:[1].\n\nAbstract\n\nThis PEP proposes an enhancement to Python's package importing to:\n\n- Surprise users of other languages less,\n- Make it easier to convert a module into a package, and\n- Support dividing packages into separately installed components (ala\n \"namespace packages\", as described in PEP 382)\n\nThe proposed enhancements do not change the semantics of any\ncurrently-importable directory layouts, but make it possible for\npackages to use a simplified directory layout (that is not importable\ncurrently).\n\nHowever, the proposed changes do NOT add any performance overhead to the\nimporting of existing modules or packages, and performance for the new\ndirectory layout should be about the same as that of previous \"namespace\npackage\" solutions (such as pkgutil.extend_path()).\n\nThe Problem\n\n \"Most packages are like modules. Their contents are highly\n interdependent and can't be pulled apart. [However,] some packages\n exist to provide a separate namespace. ... It should be possible to\n distribute sub-packages or submodules of these [namespace packages]\n independently.\"\n\n -- Jim Fulton, shortly before the release of Python 2.3[2]\n\nWhen new users come to Python from other languages, they are often\nconfused by Python's package import semantics. At Google, for example,\nGuido received complaints from \"a large crowd with pitchforks\"[3] that\nthe requirement for packages to contain an __init__ module was a\n\"misfeature\", and should be dropped.\n\nIn addition, users coming from languages like Java or Perl are sometimes\nconfused by a difference in Python's import path searching.\n\nIn most other languages that have a similar path mechanism to Python's\nsys.path, a package is merely a namespace that contains modules or\nclasses, and can thus be spread across multiple directories in the\nlanguage's path. In Perl, for instance, a Foo::Bar module will be\nsearched for in Foo/ subdirectories all along the module include path,\nnot just in the first such subdirectory found.\n\nWorse, this is not just a problem for new users: it prevents anyone from\neasily splitting a package into separately-installable components. In\nPerl terms, it would be as if every possible Net:: module on CPAN had to\nbe bundled up and shipped in a single tarball!\n\nFor that reason, various workarounds for this latter limitation exist,\ncirculated under the term \"namespace packages\". The Python standard\nlibrary has provided one such workaround since Python 2.3 (via the\npkgutil.extend_path() function), and the \"setuptools\" package provides\nanother (via pkg_resources.declare_namespace()).\n\nThe workarounds themselves, however, fall prey to a third issue with\nPython's way of laying out packages in the filesystem.\n\nBecause a package must contain an __init__ module, any attempt to\ndistribute modules for that package must necessarily include that\n__init__ module, if those modules are to be importable.\n\nHowever, the very fact that each distribution of modules for a package\nmust contain this (duplicated) __init__ module, means that OS vendors\nwho package up these module distributions must somehow handle the\nconflict caused by several module distributions installing that __init__\nmodule to the same location in the filesystem.\n\nThis led to the proposing of PEP 382 (\"Namespace Packages\") - a way to\nsignal to Python's import machinery that a directory was importable,\nusing unique filenames per module distribution.\n\nHowever, there was more than one downside to this approach. Performance\nfor all import operations would be affected, and the process of\ndesignating a package became even more complex. New terminology had to\nbe invented to explain the solution, and so on.\n\nAs terminology discussions continued on the Import-SIG, it soon became\napparent that the main reason it was so difficult to explain the\nconcepts related to \"namespace packages\" was because Python's current\nway of handling packages is somewhat underpowered, when compared to\nother languages.\n\nThat is, in other popular languages with package systems, no special\nterm is needed to describe \"namespace packages\", because all packages\ngenerally behave in the desired fashion.\n\nRather than being an isolated single directory with a special marker\nmodule (as in Python), packages in other languages are typically just\nthe union of appropriately-named directories across the entire import or\ninclusion path.\n\nIn Perl, for example, the module Foo is always found in a Foo.pm file,\nand a module Foo::Bar is always found in a Foo/Bar.pm file. (In other\nwords, there is One Obvious Way to find the location of a particular\nmodule.)\n\nThis is because Perl considers a module to be different from a package:\nthe package is purely a namespace in which other modules may reside, and\nis only coincidentally the name of a module as well.\n\nIn current versions of Python, however, the module and the package are\nmore tightly bound together. Foo is always a module -- whether it is\nfound in Foo.py or Foo/__init__.py -- and it is tightly linked to its\nsubmodules (if any), which must reside in the exact same directory where\nthe __init__.py was found.\n\nOn the positive side, this design choice means that a package is quite\nself-contained, and can be installed, copied, etc. as a unit just by\nperforming an operation on the package's root directory.\n\nOn the negative side, however, it is non-intuitive for beginners, and\nrequires a more complex step to turn a module into a package. If Foo\nbegins its life as Foo.py, then it must be moved and renamed to\nFoo/__init__.py.\n\nConversely, if you intend to create a Foo.Bar module from the start, but\nhave no particular module contents to put in Foo itself, then you have\nto create an empty and seemingly-irrelevant Foo/__init__.py file, just\nso that Foo.Bar can be imported.\n\n(And these issues don't just confuse newcomers to the language, either:\nthey annoy many experienced developers as well.)\n\nSo, after some discussion on the Import-SIG, this PEP was created as an\nalternative to PEP 382, in an attempt to solve all of the above\nproblems, not just the \"namespace package\" use cases.\n\nAnd, as a delightful side effect, the solution proposed in this PEP does\nnot affect the import performance of ordinary modules or self-contained\n(i.e. __init__-based) packages.\n\nThe Solution\n\nIn the past, various proposals have been made to allow more intuitive\napproaches to package directory layout. However, most of them failed\nbecause of an apparent backward-compatibility problem.\n\nThat is, if the requirement for an __init__ module were simply dropped,\nit would open up the possibility for a directory named, say, string on\nsys.path, to block importing of the standard library string module.\n\nParadoxically, however, the failure of this approach does not arise from\nthe elimination of the __init__ requirement!\n\nRather, the failure arises because the underlying approach takes for\ngranted that a package is just ONE thing, instead of two.\n\nIn truth, a package comprises two separate, but related entities: a\nmodule (with its own, optional contents), and a namespace where other\nmodules or packages can be found.\n\nIn current versions of Python, however, the module part (found in\n__init__) and the namespace for submodule imports (represented by the\n__path__ attribute) are both initialized at the same time, when the\npackage is first imported.\n\nAnd, if you assume this is the only way to initialize these two things,\nthen there is no way to drop the need for an __init__ module, while\nstill being backwards-compatible with existing directory layouts.\n\nAfter all, as soon as you encounter a directory on sys.path matching the\ndesired name, that means you've \"found\" the package, and must stop\nsearching, right?\n\nWell, not quite.\n\nA Thought Experiment\n\nLet's hop into the time machine for a moment, and pretend we're back in\nthe early 1990s, shortly before Python packages and __init__.py have\nbeen invented. But, imagine that we are familiar with Perl-like package\nimports, and we want to implement a similar system in Python.\n\nWe'd still have Python's module imports to build on, so we could\ncertainly conceive of having Foo.py as a parent Foo module for a Foo\npackage. But how would we implement submodule and subpackage imports?\n\nWell, if we didn't have the idea of __path__ attributes yet, we'd\nprobably just search sys.path looking for Foo/Bar.py.\n\nBut we'd only do it when someone actually tried to import Foo.Bar.\n\nNOT when they imported Foo.\n\nAnd that lets us get rid of the backwards-compatibility problem of\ndropping the __init__ requirement, back here in 2011.\n\nHow?\n\nWell, when we import Foo, we're not even looking for Foo/ directories on\nsys.path, because we don't care yet. The only point at which we care, is\nthe point when somebody tries to actually import a submodule or\nsubpackage of Foo.\n\nThat means that if Foo is a standard library module (for example), and I\nhappen to have a Foo directory on sys.path (without an __init__.py, of\ncourse), then nothing breaks. The Foo module is still just a module, and\nit's still imported normally.\n\nSelf-Contained vs. \"Virtual\" Packages\n\nOf course, in today's Python, trying to import Foo.Bar will fail if Foo\nis just a Foo.py module (and thus lacks a __path__ attribute).\n\nSo, this PEP proposes to dynamically create a __path__, in the case\nwhere one is missing.\n\nThat is, if I try to import Foo.Bar the proposed change to the import\nmachinery will notice that the Foo module lacks a __path__, and will\ntherefore try to build one before proceeding.\n\nAnd it will do this by making a list of all the existing Foo/\nsubdirectories of the directories listed in sys.path.\n\nIf the list is empty, the import will fail with ImportError, just like\ntoday. But if the list is not empty, then it is saved in a new\nFoo.__path__ attribute, making the module a \"virtual package\".\n\nThat is, because it now has a valid __path__, we can proceed to import\nsubmodules or subpackages in the normal way.\n\nNow, notice that this change does not affect \"classic\", self-contained\npackages that have an __init__ module in them. Such packages already\nhave a __path__ attribute (initialized at import time) so the import\nmachinery won't try to create another one later.\n\nThis means that (for example) the standard library email package will\nnot be affected in any way by you having a bunch of unrelated\ndirectories named email on sys.path. (Even if they contain *.py files.)\n\nBut it does mean that if you want to turn your Foo module into a Foo\npackage, all you have to do is add a Foo/ directory somewhere on\nsys.path, and start adding modules to it.\n\nBut what if you only want a \"namespace package\"? That is, a package that\nis only a namespace for various separately-distributed submodules and\nsubpackages?\n\nFor example, if you're Zope Corporation, distributing dozens of separate\ntools like zc.buildout, each in packages under the zc namespace, you\ndon't want to have to make and include an empty zc.py in every tool you\nship. (And, if you're a Linux or other OS vendor, you don't want to deal\nwith the package installation conflicts created by trying to install ten\ncopies of zc.py to the same location!)\n\nNo problem. All we have to do is make one more minor tweak to the import\nprocess: if the \"classic\" import process fails to find a self-contained\nmodule or package (e.g., if import zc fails to find a zc.py or\nzc/__init__.py), then we once more try to build a __path__ by searching\nfor all the zc/ directories on sys.path, and putting them in a list.\n\nIf this list is empty, we raise ImportError. But if it's non-empty, we\ncreate an empty zc module, and put the list in zc.__path__.\nCongratulations: zc is now a namespace-only, \"pure virtual\" package! It\nhas no module contents, but you can still import submodules and\nsubpackages from it, regardless of where they're located on sys.path.\n\n(By the way, both of these additions to the import protocol (i.e. the\ndynamically-added __path__, and dynamically-created modules) apply\nrecursively to child packages, using the parent package's __path__ in\nplace of sys.path as a basis for generating a child __path__. This means\nthat self-contained and virtual packages can contain each other without\nlimitation, with the caveat that if you put a virtual package inside a\nself-contained one, it's gonna have a really short __path__!)\n\nBackwards Compatibility and Performance\n\nNotice that these two changes only affect import operations that today\nwould result in ImportError. As a result, the performance of imports\nthat do not involve virtual packages is unaffected, and potential\nbackward compatibility issues are very restricted.\n\nToday, if you try to import submodules or subpackages from a module with\nno __path__, it's an immediate error. And of course, if you don't have a\nzc.py or zc/__init__.py somewhere on sys.path today, import zc would\nlikewise fail.\n\nThus, the only potential backwards-compatibility issues are:\n\n1. Tools that expect package directories to have an __init__ module,\n that expect directories without an __init__ module to be\n unimportable, or that expect __path__ attributes to be static, will\n not recognize virtual packages as packages.\n\n (In practice, this just means that tools will need updating to\n support virtual packages, e.g. by using pkgutil.walk_modules()\n instead of using hardcoded filesystem searches.)\n\n2. Code that expects certain imports to fail may now do something\n unexpected. This should be fairly rare in practice, as most sane,\n non-test code does not import things that are expected not to exist!\n\nThe biggest likely exception to the above would be when a piece of code\ntries to check whether some package is installed by importing it. If\nthis is done only by importing a top-level module (i.e., not checking\nfor a __version__ or some other attribute), and there is a directory of\nthe same name as the sought-for package on sys.path somewhere, and the\npackage is not actually installed, then such code could be fooled into\nthinking a package is installed that really isn't.\n\nFor example, suppose someone writes a script (datagen.py) containing the\nfollowing code:\n\n try:\n import json\n except ImportError:\n import simplejson as json\n\nAnd runs it in a directory laid out like this:\n\n datagen.py\n json/\n foo.js\n bar.js\n\nIf import json succeeded due to the mere presence of the json/\nsubdirectory, the code would incorrectly believe that the json module\nwas available, and proceed to fail with an error.\n\nHowever, we can prevent corner cases like these from arising, simply by\nmaking one small change to the algorithm presented so far. Instead of\nallowing you to import a \"pure virtual\" package (like zc), we allow only\nimporting of the contents of virtual packages.\n\nThat is, a statement like import zc should raise ImportError if there is\nno zc.py or zc/__init__.py on sys.path. But, doing import zc.buildout\nshould still succeed, as long as there's a zc/buildout.py or\nzc/buildout/__init__.py on sys.path.\n\nIn other words, we don't allow pure virtual packages to be imported\ndirectly, only modules and self-contained packages. (This is an\nacceptable limitation, because there is no functional value to importing\nsuch a package by itself. After all, the module object will have no\ncontents until you import at least one of its subpackages or\nsubmodules!)\n\nOnce zc.buildout has been successfully imported, though, there will be a\nzc module in sys.modules, and trying to import it will of course\nsucceed. We are only preventing an initial import from succeeding, in\norder to prevent false-positive import successes when clashing\nsubdirectories are present on sys.path.\n\nSo, with this slight change, the datagen.py example above will work\ncorrectly. When it does import json, the mere presence of a json/\ndirectory will simply not affect the import process at all, even if it\ncontains .py files. The json/ directory will still only be searched in\nthe case where an import like import json.converter is attempted.\n\nMeanwhile, tools that expect to locate packages and modules by walking a\ndirectory tree can be updated to use the existing pkgutil.walk_modules()\nAPI, and tools that need to inspect packages in memory should use the\nother APIs described in the Standard Library Changes/Additions section\nbelow.\n\nSpecification\n\nA change is made to the existing import process, when importing names\ncontaining at least one . -- that is, imports of modules that have a\nparent package.\n\nSpecifically, if the parent package does not exist, or exists but lacks\na __path__ attribute, an attempt is first made to create a \"virtual\npath\" for the parent package (following the algorithm described in the\nsection on virtual paths, below).\n\nIf the computed \"virtual path\" is empty, an ImportError results, just as\nit would today. However, if a non-empty virtual path is obtained, the\nnormal import of the submodule or subpackage proceeds, using that\nvirtual path to find the submodule or subpackage. (Just as it would have\nwith the parent's __path__, if the parent package had existed and had a\n__path__.)\n\nWhen a submodule or subpackage is found (but not yet loaded), the parent\npackage is created and added to sys.modules (if it didn't exist before),\nand its __path__ is set to the computed virtual path (if it wasn't\nalready set).\n\nIn this way, when the actual loading of the submodule or subpackage\noccurs, it will see a parent package existing, and any relative imports\nwill work correctly. However, if no submodule or subpackage exists, then\nthe parent package will not be created, nor will a standalone module be\nconverted into a package (by the addition of a spurious __path__\nattribute).\n\nNote, by the way, that this change must be applied recursively: that is,\nif foo and foo.bar are pure virtual packages, then import foo.bar.baz\nmust wait until foo.bar.baz is found before creating module objects for\nboth foo and foo.bar, and then create both of them together, properly\nsetting the foo module's .bar attribute to point to the foo.bar module.\n\nIn this way, pure virtual packages are never directly importable: an\nimport foo or import foo.bar by itself will fail, and the corresponding\nmodules will not appear in sys.modules until they are needed to point to\na successfully imported submodule or self-contained subpackage.\n\nVirtual Paths\n\nA virtual path is created by obtaining a PEP 302 \"importer\" object for\neach of the path entries found in sys.path (for a top-level module) or\nthe parent __path__ (for a submodule).\n\n(Note: because sys.meta_path importers are not associated with sys.path\nor __path__ entry strings, such importers do not participate in this\nprocess.)\n\nEach importer is checked for a get_subpath() method, and if present, the\nmethod is called with the full name of the module/package the path is\nbeing constructed for. The return value is either a string representing\na subdirectory for the requested package, or None if no such\nsubdirectory exists.\n\nThe strings returned by the importers are added to the path list being\nbuilt, in the same order as they are found. (None values and missing\nget_subpath() methods are simply skipped.)\n\nThe resulting list (whether empty or not) is then stored in a\nsys.virtual_package_paths dictionary, keyed by module name.\n\nThis dictionary has two purposes. First, it serves as a cache, in the\nevent that more than one attempt is made to import a submodule of a\nvirtual package.\n\nSecond, and more importantly, the dictionary can be used by code that\nextends sys.path at runtime to update imported packages' __path__\nattributes accordingly. (See Standard Library Changes/Additions below\nfor more details.)\n\nIn Python code, the virtual path construction algorithm would look\nsomething like this:\n\n def get_virtual_path(modulename, parent_path=None):\n\n if modulename in sys.virtual_package_paths:\n return sys.virtual_package_paths[modulename]\n\n if parent_path is None:\n parent_path = sys.path\n\n path = []\n\n for entry in parent_path:\n # Obtain a PEP 302 importer object - see pkgutil module\n importer = pkgutil.get_importer(entry)\n\n if hasattr(importer, 'get_subpath'):\n subpath = importer.get_subpath(modulename)\n if subpath is not None:\n path.append(subpath)\n\n sys.virtual_package_paths[modulename] = path\n return path\n\nAnd a function like this one should be exposed in the standard library\nas e.g. imp.get_virtual_path(), so that people creating __import__\nreplacements or sys.meta_path hooks can reuse it.\n\nStandard Library Changes/Additions\n\nThe pkgutil module should be updated to handle this specification\nappropriately, including any necessary changes to extend_path(),\niter_modules(), etc.\n\nSpecifically the proposed changes and additions to pkgutil are:\n\n- A new extend_virtual_paths(path_entry) function, to extend existing,\n already-imported virtual packages' __path__ attributes to include\n any portions found in a new sys.path entry. This function should be\n called by applications extending sys.path at runtime, e.g. when\n adding a plugin directory or an egg to the path.\n\n The implementation of this function does a simple top-down traversal\n of sys.virtual_package_paths, and performs any necessary\n get_subpath() calls to identify what path entries need to be added\n to the virtual path for that package, given that path_entry has been\n added to sys.path. (Or, in the case of sub-packages, adding a\n derived subpath entry, based on their parent package's virtual\n path.)\n\n (Note: this function must update both the path values in\n sys.virtual_package_paths as well as the __path__ attributes of any\n corresponding modules in sys.modules, even though in the common case\n they will both be the same list object.)\n\n- A new iter_virtual_packages(parent='') function to allow top-down\n traversal of virtual packages from sys.virtual_package_paths, by\n yielding the child virtual packages of parent. For example, calling\n iter_virtual_packages(\"zope\") might yield zope.app and zope.products\n (if they are virtual packages listed in sys.virtual_package_paths),\n but not zope.foo.bar. (This function is needed to implement\n extend_virtual_paths(), but is also potentially useful for other\n code that needs to inspect imported virtual packages.)\n\n- ImpImporter.iter_modules() should be changed to also detect and\n yield the names of modules found in virtual packages.\n\nIn addition to the above changes, the zipimport importer should have its\niter_modules() implementation similarly changed. (Note: current versions\nof Python implement this via a shim in pkgutil, so technically this is\nalso a change to pkgutil.)\n\nLast, but not least, the imp module (or importlib, if appropriate)\nshould expose the algorithm described in the virtual paths section\nabove, as a get_virtual_path(modulename, parent_path=None) function, so\nthat creators of __import__ replacements can use it.\n\nImplementation Notes\n\nFor users, developers, and distributors of virtual packages:\n\n- While virtual packages are easy to set up and use, there is still a\n time and place for using self-contained packages. While it's not\n strictly necessary, adding an __init__ module to your self-contained\n packages lets users of the package (and Python itself) know that all\n of the package's code will be found in that single subdirectory. In\n addition, it lets you define __all__, expose a public API, provide a\n package-level docstring, and do other things that make more sense\n for a self-contained project than for a mere \"namespace\" package.\n\n- sys.virtual_package_paths is allowed to contain entries for\n non-existent or not-yet-imported package names; code that uses its\n contents should not assume that every key in this dictionary is also\n present in sys.modules or that importing the name will necessarily\n succeed.\n\n- If you are changing a currently self-contained package into a\n virtual one, it's important to note that you can no longer use its\n __file__ attribute to locate data files stored in a package\n directory. Instead, you must search __path__ or use the __file__ of\n a submodule adjacent to the desired files, or of a self-contained\n subpackage that contains the desired files.\n\n (Note: this caveat is already true for existing users of \"namespace\n packages\" today. That is, it is an inherent result of being able to\n partition a package, that you must know which partition the desired\n data file lives in. We mention it here simply so that new users\n converting from self-contained to virtual packages will also be\n aware of it.)\n\n- XXX what is the __file__ of a \"pure virtual\" package? None? Some\n arbitrary string? The path of the first directory with a trailing\n separator? No matter what we put, some code is going to break, but\n the last choice might allow some code to accidentally work. Is that\n good or bad?\n\nFor those implementing PEP 302 importer objects:\n\n- Importers that support the iter_modules() method (used by pkgutil to\n locate importable modules and packages) and want to add virtual\n package support should modify their iter_modules() method so that it\n discovers and lists virtual packages as well as standard modules and\n packages. To do this, the importer should simply list all immediate\n subdirectory names in its jurisdiction that are valid Python\n identifiers.\n\n XXX This might list a lot of not-really-packages. Should we require\n importable contents to exist? If so, how deep do we search, and how\n do we prevent e.g. link loops, or traversing onto different\n filesystems, etc.? Ick. Also, if virtual packages are listed, they\n still can't be imported, which is a problem for the way that\n pkgutil.walk_modules() is currently implemented.\n\n- \"Meta\" importers (i.e., importers placed on sys.meta_path) do not\n need to implement get_subpath(), because the method is only called\n on importers corresponding to sys.path entries and __path__ entries.\n If a meta importer wishes to support virtual packages, it must do so\n entirely within its own find_module() implementation.\n\n Unfortunately, it is unlikely that any such implementation will be\n able to merge its package subpaths with those of other meta\n importers or sys.path importers, so the meaning of \"supporting\n virtual packages\" for a meta importer is currently undefined!\n\n (However, since the intended use case for meta importers is to\n replace Python's normal import process entirely for some subset of\n modules, and the number of such importers currently implemented is\n quite small, this seems unlikely to be a big issue in practice.)\n\nReferences\n\nCopyright\n\nThis document has been placed in the public domain.\n\n[1] Namespace Packages resolution\n(https://mail.python.org/pipermail/import-sig/2012-March/000421.html)\n\n[2] \"namespace\" vs \"module\" packages (mailing list thread)\n(http://mail.zope.org/pipermail/zope3-dev/2002-December/004251.html)\n\n[3] \"Dropping __init__.py requirement for subpackages\"\n(https://mail.python.org/pipermail/python-dev/2006-April/064400.html)"},"source":{"kind":"string","value":"python-peps"},"added":{"kind":"string","value":"2024-10-18T13:23:33.735606"},"created":{"kind":"timestamp","value":"2011-07-12T00:00:00","string":"2011-07-12T00:00:00"},"metadata":{"kind":"string","value":"{\n \"license\": \"Public Domain\",\n \"url\": \"https://peps.python.org/pep-0402/\",\n \"authors\": [\n \"Phillip J. Eby\"\n ],\n \"pep_number\": \"0402\",\n \"pandoc_version\": \"3.5\"\n}"}}},{"rowIdx":579,"cells":{"id":{"kind":"string","value":"0295"},"text":{"kind":"string","value":"PEP: 295 Title: Interpretation of multiline string constants Author:\nStepan Koltsov Status: Rejected Type: Standards Track\nContent-Type: text/x-rst Created: 22-Jul-2002 Python-Version: 3.0\nPost-History:\n\nAbstract\n\nThis PEP describes an interpretation of multiline string constants for\nPython. It suggests stripping spaces after newlines and stripping a\nnewline if it is first character after an opening quotation.\n\nRationale\n\nThis PEP proposes an interpretation of multiline string constants in\nPython. Currently, the value of string constant is all the text between\nquotations, maybe with escape sequences substituted, e.g.:\n\n def f():\n \"\"\"\n la-la-la\n limona, banana\n \"\"\"\n\n def g():\n return \"This is \\\n string\"\n\n print repr(f.__doc__)\n print repr(g())\n\nprints:\n\n '\\n\\tla-la-la\\n\\tlimona, banana\\n\\t'\n 'This is \\tstring'\n\nThis PEP suggest two things:\n\n- ignore the first character after opening quotation, if it is newline\n- ignore in string constants all spaces and tabs up to first\n non-whitespace character, but no more than current indentation.\n\nAfter applying this, previous program will print:\n\n 'la-la-la\\nlimona, banana\\n'\n 'This is string'\n\nTo get this result, previous programs could be rewritten for current\nPython as (note, this gives the same result with new strings meaning):\n\n def f():\n \"\"\"\\\n la-la-la\n limona, banana\n \"\"\"\n\n def g():\n \"This is \\\n string\"\n\nOr stripping can be done with library routines at runtime (as pydoc\ndoes), but this decreases program readability.\n\nImplementation\n\nI'll say nothing about CPython, Jython or Python.NET.\n\nIn original Python, there is no info about the current indentation (in\nspaces) at compile time, so space and tab stripping should be done at\nparse time. Currently no flags can be passed to the parser in program\ntext (like from __future__ import xxx). I suggest enabling or disabling\nof this feature at Python compile time depending of CPP flag\nPy_PARSE_MULTILINE_STRINGS.\n\nAlternatives\n\nNew interpretation of string constants can be implemented with flags 'i'\nand 'o' to string constants, like:\n\n i\"\"\"\n SELECT * FROM car\n WHERE model = 'i525'\n \"\"\" is in new style,\n\n o\"\"\"SELECT * FROM employee\n WHERE birth < 1982\n \"\"\" is in old style, and\n\n \"\"\"\n SELECT employee.name, car.name, car.price FROM employee, car\n WHERE employee.salary * 36 > car.price\n \"\"\" is in new style after Python-x.y.z and in old style otherwise.\n\nAlso this feature can be disabled if string is raw, i.e. if flag 'r'\nspecified.\n\nCopyright\n\nThis document has been placed in the Public Domain."},"source":{"kind":"string","value":"python-peps"},"added":{"kind":"string","value":"2024-10-18T13:23:33.742063"},"created":{"kind":"timestamp","value":"2002-07-22T00:00:00","string":"2002-07-22T00:00:00"},"metadata":{"kind":"string","value":"{\n \"license\": \"Public Domain\",\n \"url\": \"https://peps.python.org/pep-0295/\",\n \"authors\": [\n \"Stepan Koltsov\"\n ],\n \"pep_number\": \"0295\",\n \"pandoc_version\": \"3.5\"\n}"}}},{"rowIdx":580,"cells":{"id":{"kind":"string","value":"0414"},"text":{"kind":"string","value":"PEP: 414 Title: Explicit Unicode Literal for Python 3.3 Version:\n$Revision$ Last-Modified: $Date$ Author: Armin Ronacher\n, Alyssa Coghlan \nStatus: Final Type: Standards Track Content-Type: text/x-rst Created:\n15-Feb-2012 Python-Version: 3.3 Post-History: 28-Feb-2012, 04-Mar-2012\nResolution:\nhttps://mail.python.org/pipermail/python-dev/2012-February/116995.html\n\nAbstract\n\nThis document proposes the reintegration of an explicit unicode literal\nfrom Python 2.x to the Python 3.x language specification, in order to\nreduce the volume of changes needed when porting Unicode-aware Python 2\napplications to Python 3.\n\nBDFL Pronouncement\n\nThis PEP has been formally accepted for Python 3.3:\n\n I'm accepting the PEP. It's about as harmless as they come. Make it\n so.\n\nProposal\n\nThis PEP proposes that Python 3.3 restore support for Python 2's Unicode\nliteral syntax, substantially increasing the number of lines of existing\nPython 2 code in Unicode aware applications that will run without\nmodification on Python 3.\n\nSpecifically, the Python 3 definition for string literal prefixes will\nbe expanded to allow:\n\n \"u\" | \"U\"\n\nin addition to the currently supported:\n\n \"r\" | \"R\"\n\nThe following will all denote ordinary Python 3 strings:\n\n 'text'\n \"text\"\n '''text'''\n \"\"\"text\"\"\"\n u'text'\n u\"text\"\n u'''text'''\n u\"\"\"text\"\"\"\n U'text'\n U\"text\"\n U'''text'''\n U\"\"\"text\"\"\"\n\nNo changes are proposed to Python 3's actual Unicode handling, only to\nthe acceptable forms for string literals.\n\nExclusion of \"Raw\" Unicode Literals\n\nPython 2 supports a concept of \"raw\" Unicode literals that don't meet\nthe conventional definition of a raw string: \\uXXXX and \\UXXXXXXXX\nescape sequences are still processed by the compiler and converted to\nthe appropriate Unicode code points when creating the associated Unicode\nobjects.\n\nPython 3 has no corresponding concept - the compiler performs no\npreprocessing of the contents of raw string literals. This matches the\nbehaviour of 8-bit raw string literals in Python 2.\n\nSince such strings are rarely used and would be interpreted differently\nin Python 3 if permitted, it was decided that leaving them out entirely\nwas a better choice. Code which uses them will thus still fail\nimmediately on Python 3 (with a Syntax Error), rather than potentially\nproducing different output.\n\nTo get equivalent behaviour that will run on both Python 2 and Python 3,\neither an ordinary Unicode literal can be used (with appropriate\nadditional escaping within the string), or else string concatenation or\nstring formatting can be combine the raw portions of the string with\nthose that require the use of Unicode escape sequences.\n\nNote that when using from __future__ import unicode_literals in Python\n2, the nominally \"raw\" Unicode string literals will process \\uXXXX and\n\\UXXXXXXXX escape sequences, just like Python 2 strings explicitly\nmarked with the \"raw Unicode\" prefix.\n\nAuthor's Note\n\nThis PEP was originally written by Armin Ronacher, and Guido's approval\nwas given based on that version.\n\nThe currently published version has been rewritten by Alyssa Coghlan to\ninclude additional historical details and rationale that were taken into\naccount when Guido made his decision, but were not explicitly documented\nin Armin's version of the PEP.\n\nReaders should be aware that many of the arguments in this PEP are not\ntechnical ones. Instead, they relate heavily to the social and personal\naspects of software development.\n\nRationale\n\nWith the release of a Python 3 compatible version of the Web Services\nGateway Interface (WSGI) specification (PEP 3333) for Python 3.2, many\nparts of the Python web ecosystem have been making a concerted effort to\nsupport Python 3 without adversely affecting their existing developer\nand user communities.\n\nOne major item of feedback from key developers in those communities,\nincluding Chris McDonough (WebOb, Pyramid), Armin Ronacher (Flask,\nWerkzeug), Jacob Kaplan-Moss (Django) and Kenneth Reitz (requests) is\nthat the requirement to change the spelling of every Unicode literal in\nan application (regardless of how that is accomplished) is a key\nstumbling block for porting efforts.\n\nIn particular, unlike many of the other Python 3 changes, it isn't one\nthat framework and library authors can easily handle on behalf of their\nusers. Most of those users couldn't care less about the \"purity\" of the\nPython language specification, they just want their websites and\napplications to work as well as possible.\n\nWhile it is the Python web community that has been most vocal in\nhighlighting this concern, it is expected that other highly Unicode\naware domains (such as GUI development) may run into similar issues as\nthey (and their communities) start making concerted efforts to support\nPython 3.\n\nCommon Objections\n\nComplaint: This PEP may harm adoption of Python 3.2\n\nThis complaint is interesting, as it carries within it a tacit admission\nthat this PEP will make it easier to port Unicode aware Python 2\napplications to Python 3.\n\nThere are many existing Python communities that are prepared to put up\nwith the constraints imposed by the existing suite of porting tools, or\nto update their Python 2 code bases sufficiently that the problems are\nminimised.\n\nThis PEP is not for those communities. Instead, it is designed\nspecifically to help people that don't want to put up with those\ndifficulties.\n\nHowever, since the proposal is for a comparatively small tweak to the\nlanguage syntax with no semantic changes, it is feasible to support it\nas a third party import hook. While such an import hook imposes some\nimport time overhead, and requires additional steps from each\napplication that needs it to get the hook in place, it allows\napplications that target Python 3.2 to use libraries and frameworks that\nwould otherwise only run on Python 3.3+ due to their use of unicode\nliteral prefixes.\n\nOne such import hook project is Vinay Sajip's uprefix[1].\n\nFor those that prefer to translate their code in advance rather than\nconverting on the fly at import time, Armin Ronacher is working on a\nhook that runs at install time rather than during import[2].\n\nCombining the two approaches is of course also possible. For example,\nthe import hook could be used for rapid edit-test cycles during local\ndevelopment, but the install hook for continuous integration tasks and\ndeployment on Python 3.2.\n\nThe approaches described in this section may prove useful, for example,\nfor applications that wish to target Python 3 on the Ubuntu 12.04 LTS\nrelease, which will ship with Python 2.7 and 3.2 as officially supported\nPython versions.\n\nComplaint: Python 3 shouldn't be made worse just to support porting from Python 2\n\nThis is indeed one of the key design principles of Python 3. However,\none of the key design principles of Python as a whole is that\n\"practicality beats purity\". If we're going to impose a significant\nburden on third party developers, we should have a solid rationale for\ndoing so.\n\nIn most cases, the rationale for backwards incompatible Python 3 changes\nare either to improve code correctness (for example, stricter default\nseparation of binary and text data and integer division upgrading to\nfloats when necessary), reduce typical memory usage (for example,\nincreased usage of iterators and views over concrete lists), or to\nremove distracting nuisances that make Python code harder to read\nwithout increasing its expressiveness (for example, the comma based\nsyntax for naming caught exceptions). Changes backed by such reasoning\nare not going to be reverted, regardless of objections from Python 2\ndevelopers attempting to make the transition to Python 3.\n\nIn many cases, Python 2 offered two ways of doing things for historical\nreasons. For example, inequality could be tested with both != and <> and\ninteger literals could be specified with an optional L suffix. Such\nredundancies have been eliminated in Python 3, which reduces the overall\nsize of the language and improves consistency across developers.\n\nIn the original Python 3 design (up to and including Python 3.2), the\nexplicit prefix syntax for unicode literals was deemed to fall into this\ncategory, as it is completely unnecessary in Python 3. However, the\ndifference between those other cases and unicode literals is that the\nunicode literal prefix is not redundant in Python 2 code: it is a\nprogrammatically significant distinction that needs to be preserved in\nsome fashion to avoid losing information.\n\nWhile porting tools were created to help with the transition (see next\nsection) it still creates an additional burden on heavy users of unicode\nstrings in Python 2, solely so that future developers learning Python 3\ndon't need to be told \"For historical reasons, string literals may have\nan optional u or U prefix. Never use this yourselves, it's just there to\nhelp with porting from an earlier version of the language.\"\n\nPlenty of students learning Python 2 received similar warnings regarding\nstring exceptions without being confused or irreparably stunted in their\ngrowth as Python developers. It will be the same with this feature.\n\nThis point is further reinforced by the fact that Python 3 still allows\nthe uppercase variants of the B and R prefixes for bytes literals and\nraw bytes and string literals. If the potential for confusion due to\nstring prefix variants is that significant, where was the outcry asking\nthat these redundant prefixes be removed along with all the other\nredundancies that were eliminated in Python 3?\n\nJust as support for string exceptions was eliminated from Python 2 using\nthe normal deprecation process, support for redundant string prefix\ncharacters (specifically, B, R, u, U) may eventually be eliminated from\nPython 3, regardless of the current acceptance of this PEP. However,\nsuch a change will likely only occur once third party libraries\nsupporting Python 2.7 is about as common as libraries supporting Python\n2.2 or 2.3 is today.\n\nComplaint: The WSGI \"native strings\" concept is an ugly hack\n\nOne reason the removal of unicode literals has provoked such concern\namongst the web development community is that the updated WSGI\nspecification had to make a few compromises to minimise the disruption\nfor existing web servers that provide a WSGI-compatible interface (this\nwas deemed necessary in order to make the updated standard a viable\ntarget for web application authors and web framework developers).\n\nOne of those compromises is the concept of a \"native string\". WSGI\ndefines three different kinds of string:\n\n- text strings: handled as unicode in Python 2 and str in Python 3\n- native strings: handled as str in both Python 2 and Python 3\n- binary data: handled as str in Python 2 and bytes in Python 3\n\nSome developers consider WSGI's \"native strings\" to be an ugly hack, as\nthey are explicitly documented as being used solely for latin-1 decoded\n\"text\", regardless of the actual encoding of the underlying data. Using\nthis approach bypasses many of the updates to Python 3's data model that\nare designed to encourage correct handling of text encodings. However,\nit generally works due to the specific details of the problem domain -\nweb server and web framework developers are some of the individuals most\naware of how blurry the line can get between binary data and text when\nworking with HTTP and related protocols, and how important it is to\nunderstand the implications of the encodings in use when manipulating\nencoded text data. At the application level most of these details are\nhidden from the developer by the web frameworks and support libraries\n(both in Python 2 and in Python 3).\n\nIn practice, native strings are a useful concept because there are some\nAPIs (both in the standard library and in third party frameworks and\npackages) and some internal interpreter details that are designed\nprimarily to work with str. These components often don't support unicode\nin Python 2 or bytes in Python 3, or, if they do, require additional\nencoding details and/or impose constraints that don't apply to the str\nvariants.\n\nSome example of interfaces that are best handled by using actual str\ninstances are:\n\n- Python identifiers (as attributes, dict keys, class names, module\n names, import references, etc)\n- URLs for the most part as well as HTTP headers in urllib/http\n servers\n- WSGI environment keys and CGI-inherited values\n- Python source code for dynamic compilation and AST hacks\n- Exception messages\n- __repr__ return value\n- preferred filesystem paths\n- preferred OS environment\n\nIn Python 2.6 and 2.7, these distinctions are most naturally expressed\nas follows:\n\n- u\"\": text string (unicode)\n- \"\": native string (str)\n- b\"\": binary data (str, also aliased as bytes)\n\nIn Python 3, the latin-1 decoded native strings are not distinguished\nfrom any other text strings:\n\n- \"\": text string (str)\n- \"\": native string (str)\n- b\"\": binary data (bytes)\n\nIf from __future__ import unicode_literals is used to modify the\nbehaviour of Python 2, then, along with an appropriate definition of\nn(), the distinction can be expressed as:\n\n- \"\": text string\n- n(\"\"): native string\n- b\"\": binary data\n\n(While n=str works for simple cases, it can sometimes have problems due\nto non-ASCII source encodings)\n\nIn the common subset of Python 2 and Python 3 (with appropriate\nspecification of a source encoding and definitions of the u() and b()\nhelper functions), they can be expressed as:\n\n- u(\"\"): text string\n- \"\": native string\n- b(\"\"): binary data\n\nThat last approach is the only variant that supports Python 2.5 and\nearlier.\n\nOf all the alternatives, the format currently supported in Python 2.6\nand 2.7 is by far the cleanest approach that clearly distinguishes the\nthree desired kinds of behaviour. With this PEP, that format will also\nbe supported in Python 3.3+. It will also be supported in Python 3.1 and\n3.2 through the use of import and install hooks. While it is\nsignificantly less likely, it is also conceivable that the hooks could\nbe adapted to allow the use of the b prefix on Python 2.5.\n\nComplaint: The existing tools should be good enough for everyone\n\nA commonly expressed sentiment from developers that have already\nsuccessfully ported applications to Python 3 is along the lines of \"if\nyou think it's hard, you're doing it wrong\" or \"it's not that hard, just\ntry it!\". While it is no doubt unintentional, these responses all have\nthe effect of telling the people that are pointing out inadequacies in\nthe current porting toolset \"there's nothing wrong with the porting\ntools, you just suck and don't know how to use them properly\".\n\nThese responses are a case of completely missing the point of what\npeople are complaining about. The feedback that resulted in this PEP\nisn't due to people complaining that ports aren't possible. Instead, the\nfeedback is coming from people that have successfully completed ports\nand are objecting that they found the experience thoroughly unpleasant\nfor the class of application that they needed to port (specifically,\nUnicode aware web frameworks and support libraries).\n\nThis is a subjective appraisal, and it's the reason why the Python 3\nporting tools ecosystem is a case where the \"one obvious way to do it\"\nphilosophy emphatically does not apply. While it was originally intended\nthat \"develop in Python 2, convert with 2to3, test both\" would be the\nstandard way to develop for both versions in parallel, in practice, the\nneeds of different projects and developer communities have proven to be\nsufficiently diverse that a variety of approaches have been devised,\nallowing each group to select an approach that best fits their needs.\n\nLennart Regebro has produced an excellent overview of the available\nmigration strategies[3], and a similar review is provided in the\nofficial porting guide[4]. (Note that the official guidance has softened\nto \"it depends on your specific situation\" since Lennart wrote his\noverview).\n\nHowever, both of those guides are written from the founding assumption\nthat all of the developers involved are already committed to the idea of\nsupporting Python 3. They make no allowance for the social aspects of\nsuch a change when you're interacting with a user base that may not be\nespecially tolerant of disruptions without a clear benefit, or are\ntrying to persuade Python 2 focused upstream developers to accept\npatches that are solely about improving Python 3 forward compatibility.\n\nWith the current porting toolset, every migration strategy will result\nin changes to every Unicode literal in a project. No exceptions. They\nwill be converted to either an unprefixed string literal (if the project\ndecides to adopt the unicode_literals import) or else to a converter\ncall like u(\"text\").\n\nIf the unicode_literals import approach is employed, but is not adopted\nacross the entire project at the same time, then the meaning of a bare\nstring literal may become annoyingly ambiguous. This problem can be\nparticularly pernicious for aggregated software, like a Django site - in\nsuch a situation, some files may end up using the unicode_literals\nimport and others may not, creating definite potential for confusion.\n\nWhile these problems are clearly solvable at a technical level, they're\na completely unnecessary distraction at the social level. Developer\nenergy should be reserved for addressing real technical difficulties\nassociated with the Python 3 transition (like distinguishing their 8-bit\ntext strings from their binary data). They shouldn't be punished with\nadditional code changes (even automated ones) solely due to the fact\nthat they have already explicitly identified their Unicode strings in\nPython 2.\n\nArmin Ronacher has created an experimental extension to 2to3 which only\nmodernizes Python code to the extent that it runs on Python 2.7 or later\nwith support from the cross-version compatibility six library. This tool\nis available as python-modernize[5]. Currently, the deltas generated by\nthis tool will affect every Unicode literal in the converted source.\nThis will create legitimate concerns amongst upstream developers asked\nto accept such changes, and amongst framework users being asked to\nchange their applications.\n\nHowever, by eliminating the noise from changes to the Unicode literal\nsyntax, many projects could be cleanly and (comparatively)\nnon-controversially made forward compatible with Python 3.3+ just by\nrunning python-modernize and applying the recommended changes.\n\nReferences\n\nCopyright\n\nThis document has been placed in the public domain.\n\n\f\n\n Local Variables: mode: indented-text indent-tabs-mode: nil\n sentence-end-double-space: t fill-column: 70 End:\n\n[1] uprefix import hook project\n(https://bitbucket.org/vinay.sajip/uprefix)\n\n[2] install hook to remove unicode string prefix characters\n(https://github.com/mitsuhiko/unicode-literals-pep/tree/master/install-hook)\n\n[3] Porting to Python 3: Migration Strategies\n(http://python3porting.com/strategies.html)\n\n[4] Porting Python 2 Code to Python 3\n(http://docs.python.org/howto/pyporting.html)\n\n[5] Python-Modernize (http://github.com/mitsuhiko/python-modernize)"},"source":{"kind":"string","value":"python-peps"},"added":{"kind":"string","value":"2024-10-18T13:23:33.764149"},"created":{"kind":"timestamp","value":"2012-02-15T00:00:00","string":"2012-02-15T00:00:00"},"metadata":{"kind":"string","value":"{\n \"license\": \"Public Domain\",\n \"url\": \"https://peps.python.org/pep-0414/\",\n \"authors\": [\n \"Armin Ronacher\"\n ],\n \"pep_number\": \"0414\",\n \"pandoc_version\": \"3.5\"\n}"}}},{"rowIdx":581,"cells":{"id":{"kind":"string","value":"3152"},"text":{"kind":"string","value":"PEP: 3152 Title: Cofunctions Version: $Revision$ Last-Modified: $Date$\nAuthor: Gregory Ewing Status: Rejected\nType: Standards Track Content-Type: text/x-rst Created: 13-Feb-2009\nPython-Version: 3.3 Post-History:\n\nAbstract\n\nA syntax is proposed for defining and calling a special type of\ngenerator called a 'cofunction'. It is designed to provide a streamlined\nway of writing generator-based coroutines, and allow the early detection\nof certain kinds of error that are easily made when writing such code,\nwhich otherwise tend to cause hard-to-diagnose symptoms.\n\nThis proposal builds on the 'yield from' mechanism described in PEP 380,\nand describes some of the semantics of cofunctions in terms of it.\nHowever, it would be possible to define and implement cofunctions\nindependently of PEP 380 if so desired.\n\nRejection\n\nSee https://mail.python.org/pipermail/python-dev/2015-April/139503.html\n\nSpecification\n\nCofunction definitions\n\nA new keyword codef is introduced which is used in place of def to\ndefine a cofunction. A cofunction is a special kind of generator having\nthe following characteristics:\n\n1. A cofunction is always a generator, even if it does not contain any\n yield or yield from expressions.\n2. A cofunction cannot be called the same way as an ordinary function.\n An exception is raised if an ordinary call to a cofunction is\n attempted.\n\nCocalls\n\nCalls from one cofunction to another are made by marking the call with a\nnew keyword cocall. The expression :\n\n cocall f(*args, **kwds)\n\nis semantically equivalent to :\n\n yield from f.__cocall__(*args, **kwds)\n\nexcept that the object returned by __cocall__ is expected to be an\niterator, so the step of calling iter() on it is skipped.\n\nThe full syntax of a cocall expression is described by the following\ngrammar lines:\n\n atom: cocall | \n cocall: 'cocall' atom cotrailer* '(' [arglist] ')'\n cotrailer: '[' subscriptlist ']' | '.' NAME\n\nThe cocall keyword is syntactically valid only inside a cofunction. A\nSyntaxError will result if it is used in any other context.\n\nObjects which implement __cocall__ are expected to return an object\nobeying the iterator protocol. Cofunctions respond to __cocall__ the\nsame way as ordinary generator functions respond to __call__, i.e. by\nreturning a generator-iterator.\n\nCertain objects that wrap other callable objects, notably bound methods,\nwill be given __cocall__ implementations that delegate to the underlying\nobject.\n\nNew builtins, attributes and C API functions\n\nTo facilitate interfacing cofunctions with non-coroutine code, there\nwill be a built-in function costart whose definition is equivalent to :\n\n def costart(obj, *args, **kwds):\n return obj.__cocall__(*args, **kwds)\n\nThere will also be a corresponding C API function :\n\n PyObject *PyObject_CoCall(PyObject *obj, PyObject *args, PyObject *kwds)\n\nIt is left unspecified for now whether a cofunction is a distinct type\nof object or, like a generator function, is simply a specially-marked\nfunction instance. If the latter, a read-only boolean attribute\n__iscofunction__ should be provided to allow testing whether a given\nfunction object is a cofunction.\n\nMotivation and Rationale\n\nThe yield from syntax is reasonably self-explanatory when used for the\npurpose of delegating part of the work of a generator to another\nfunction. It can also be used to good effect in the implementation of\ngenerator-based coroutines, but it reads somewhat awkwardly when used\nfor that purpose, and tends to obscure the true intent of the code.\n\nFurthermore, using generators as coroutines is somewhat error-prone. If\none forgets to use yield from when it should have been used, or uses it\nwhen it shouldn't have, the symptoms that result can be obscure and\nconfusing.\n\nFinally, sometimes there is a need for a function to be a coroutine even\nthough it does not yield anything, and in these cases it is necessary to\nresort to kludges such as if 0: yield to force it to be a generator.\n\nThe codef and cocall constructs address the first issue by making the\nsyntax directly reflect the intent, that is, that the function forms\npart of a coroutine.\n\nThe second issue is addressed by making it impossible to mix coroutine\nand non-coroutine code in ways that don't make sense. If the rules are\nviolated, an exception is raised that points out exactly what and where\nthe problem is.\n\nLastly, the need for dummy yields is eliminated by making the form of\ndefinition determine whether the function is a coroutine, rather than\nwhat it contains.\n\nPrototype Implementation\n\nAn implementation in the form of patches to Python 3.1.2 can be found\nhere:\n\nhttp://www.cosc.canterbury.ac.nz/greg.ewing/python/generators/cofunctions.html\n\nCopyright\n\nThis document has been placed in the public domain.\n\n\f\n\n Local Variables: mode: indented-text indent-tabs-mode: nil\n sentence-end-double-space: t fill-column: 70 coding: utf-8 End:"},"source":{"kind":"string","value":"python-peps"},"added":{"kind":"string","value":"2024-10-18T13:23:33.775058"},"created":{"kind":"timestamp","value":"2009-02-13T00:00:00","string":"2009-02-13T00:00:00"},"metadata":{"kind":"string","value":"{\n \"license\": \"Public Domain\",\n \"url\": \"https://peps.python.org/pep-3152/\",\n \"authors\": [\n \"Gregory Ewing\"\n ],\n \"pep_number\": \"3152\",\n \"pandoc_version\": \"3.5\"\n}"}}},{"rowIdx":582,"cells":{"id":{"kind":"string","value":"0364"},"text":{"kind":"string","value":"PEP: 364 Title: Transitioning to the Py3K Standard Library Version:\n$Revision$ Last-Modified: $Date$ Author: Barry Warsaw \nStatus: Withdrawn Type: Standards Track Content-Type: text/x-rst\nCreated: 01-Mar-2007 Python-Version: 2.6 Post-History:\n\nAbstract\n\nPEP 3108 describes the reorganization of the Python standard library for\nthe Python 3.0 release. This PEP describes a mechanism for transitioning\nfrom the Python 2.x standard library to the Python 3.0 standard library.\nThis transition will allow and encourage Python programmers to use the\nnew Python 3.0 library names starting with Python 2.6, while maintaining\nthe old names for backward compatibility. In this way, a Python\nprogrammer will be able to write forward compatible code without\nsacrificing interoperability with existing Python programs.\n\nRationale\n\nPEP 3108 presents a rationale for Python standard library (stdlib)\nreorganization. The reader is encouraged to consult that PEP for details\nabout why and how the library will be reorganized. Should PEP 3108 be\naccepted in part or in whole, then it is advantageous to allow Python\nprogrammers to begin the transition to the new stdlib module names in\nPython 2.x, so that they can write forward compatible code starting with\nPython 2.6.\n\nNote that PEP 3108 proposes to remove some \"silly old stuff\", i.e.\nmodules that are no longer useful or necessary. The PEP you are reading\ndoes not address this because there are no forward compatibility issues\nfor modules that are to be removed, except to stop using such modules.\n\nThis PEP concerns only the mechanism by which mappings from old stdlib\nnames to new stdlib names are maintained. Please consult PEP 3108 for\nall specific module renaming proposals. Specifically see the section\ntitled Modules to Rename for guidelines on the old name to new name\nmappings. The few examples in this PEP are given for illustrative\npurposes only and should not be used for specific renaming\nrecommendations.\n\nSupported Renamings\n\nThere are at least 4 use cases explicitly supported by this PEP:\n\n- Simple top-level package name renamings, such as StringIO to\n stringio;\n- Sub-package renamings where the package name may or may not be\n renamed, such as email.MIMEText to email.mime.text;\n- Extension module renaming, such as cStringIO to cstringio;\n- Third party renaming of any of the above.\n\nTwo use cases supported by this PEP include renaming simple top-level\nmodules, such as StringIO, as well as modules within packages, such as\nemail.MIMEText.\n\nIn the former case, PEP 3108 currently recommends StringIO be renamed to\nstringio, following PEP 8 recommendations.\n\nIn the latter case, the email 4.0 package distributed with Python 2.5\nalready renamed email.MIMEText to email.mime.text, although it did so in\na one-off, uniquely hackish way inside the email package. The mechanism\ndescribed in this PEP is general enough to handle all module renamings,\nobviating the need for the Python 2.5 hack (except for backward\ncompatibility with earlier Python versions).\n\nAn additional use case is to support the renaming of C extension\nmodules. As long as the new name for the C module is importable, it can\nbe remapped to the new name. E.g. cStringIO renamed to cstringio.\n\nThird party package renaming is also supported, via several public\ninterfaces accessible by any Python module.\n\nRemappings are not performed recursively.\n\n.mv files\n\nRemapping files are called .mv files; the suffix was chosen to be\nevocative of the Unix mv(1) command. An .mv file is a simple\nline-oriented text file. All blank lines and lines that start with a #\nare ignored. All other lines must contain two whitespace separated\nfields. The first field is the old module name, and the second field is\nthe new module name. Both module names must be specified using their\nfull dotted-path names. Here is an example .mv file from Python 2.6:\n\n # Map the various string i/o libraries to their new names\n StringIO stringio\n cStringIO cstringio\n\n.mv files can appear anywhere in the file system, and there is a\nprogrammatic interface provided to parse them, and register the\nremappings inside them. By default, when Python starts up, all the .mv\nfiles in the oldlib package are read, and their remappings are\nautomatically registered. This is where all the module remappings should\nbe specified for top-level Python 2.x standard library modules.\n\nImplementation Specification\n\nThis section provides the full specification for how module renamings in\nPython 2.x are implemented. The central mechanism relies on various\nimport hooks as described in PEP 302. Specifically\nsys.path_importer_cache, sys.path, and sys.meta_path are all employed to\nprovide the necessary functionality.\n\nWhen Python's import machinery is initialized, the oldlib package is\nimported. Inside oldlib there is a class called OldStdlibLoader. This\nclass implements the PEP 302 interface and is automatically\ninstantiated, with zero arguments. The constructor reads all the .mv\nfiles from the oldlib package directory, automatically registering all\nthe remappings found in those .mv files. This is how the Python 2.x\nstandard library is remapped.\n\nThe OldStdlibLoader class should not be instantiated by other Python\nmodules. Instead, you can access the global OldStdlibLoader instance via\nthe sys.stdlib_remapper instance. Use this instance if you want\nprogrammatic access to the remapping machinery.\n\nOne important implementation detail: as needed by the PEP 302 API, a\nmagic string is added to sys.path, and module __path__ attributes in\norder to hook in our remapping loader. This magic string is currently\n and some changes were necessary to Python's site.py file in\norder to treat all sys.path entries starting with < as special.\nSpecifically, no attempt is made to make them absolute file names (since\nthey aren't file names at all).\n\nIn order for the remapping import hooks to work, the module or package\nmust be physically located under its new name. This is because the\nimport hooks catch only modules that are not already imported, and\ncannot be imported by Python's built-in import rules. Thus, if a module\nhas been moved, say from Lib/StringIO.py to Lib/stringio.py, and the\nformer's .pyc file has been removed, then without the remapper, this\nwould fail:\n\n import StringIO\n\nInstead, with the remapper, this failing import will be caught, the old\nname will be looked up in the registered remappings, and in this case,\nthe new name stringio will be found. The remapper then attempts to\nimport the new name, and if that succeeds, it binds the resulting module\ninto sys.modules, under both the old and new names. Thus, the above\nimport will result in entries in sys.modules for 'StringIO' and\n'stringio', and both will point to the exact same module object.\n\nNote that no way to disable the remapping machinery is proposed, short\nof moving all the .mv files away or programmatically removing them in\nsome custom start up code. In Python 3.0, the remappings will be\neliminated, leaving only the \"new\" names.\n\nProgrammatic Interface\n\nSeveral methods are added to the sys.stdlib_remapper object, which third\nparty packages can use to register their own remappings. Note however\nthat in all cases, there is one and only one mapping from an old name to\na new name. If two .mv files contain different mappings for an old name,\nor if a programmatic call is made with an old name that is already\nremapped, the previous mapping is lost. This will not affect any already\nimported modules.\n\nThe following methods are available on the sys.stdlib_remapper object:\n\n- read_mv_file(filename) -- Read the given file and register all\n remappings found in the file.\n- read_directory_mv_files(dirname, suffix='.mv') -- List the given\n directory, reading all files in that directory that have the\n matching suffix (.mv by default). For each parsed file, register all\n the remappings found in that file.\n- set_mapping(oldname, newname) -- Register a new mapping from an old\n module name to a new module name. Both must be the full dotted-path\n name to the module. newname may be None in which case any existing\n mapping for oldname will be removed (it is not an error if there is\n no existing mapping).\n- get_mapping(oldname, default=None) -- Return any registered newname\n for the given oldname. If there is no registered remapping, default\n is returned.\n\nOpen Issues\n\n- Should there be a command line switch and/or environment variable to\n disable all remappings?\n\n- Should remappings occur recursively?\n\n- Should we automatically parse package directories for .mv files when\n the package's __init__.py is loaded? This would allow packages to\n easily include .mv files for their own remappings. Compare what the\n email package currently has to do if we place its .mv file in the\n email package instead of in the oldlib package:\n\n # Expose old names\n import os, sys\n sys.stdlib_remapper.read_directory_mv_files(os.path.dirname(__file__))\n\n I think we should automatically read a package's directory for any\n .mv files it might contain.\n\nReference Implementation\n\nA reference implementation, in the form of a patch against the current\n(as of this writing) state of the Python 2.6 svn trunk, is available as\nSourceForge patch #1675334[1]. Note that this patch includes a rename of\ncStringIO to cstringio, but this is primarily for illustrative and unit\ntesting purposes. Should the patch be accepted, we might want to split\nthis change off into other PEP 3108 changes.\n\nReferences\n\nCopyright\n\nThis document has been placed in the public domain.\n\n\f\n\n Local Variables: mode: indented-text indent-tabs-mode: nil\n sentence-end-double-space: t fill-column: 70 coding: utf-8 End:\n\n[1] Reference implementation (http://bugs.python.org/issue1675334)"},"source":{"kind":"string","value":"python-peps"},"added":{"kind":"string","value":"2024-10-18T13:23:33.787942"},"created":{"kind":"timestamp","value":"2007-03-01T00:00:00","string":"2007-03-01T00:00:00"},"metadata":{"kind":"string","value":"{\n \"license\": \"Public Domain\",\n \"url\": \"https://peps.python.org/pep-0364/\",\n \"authors\": [\n \"Barry Warsaw\"\n ],\n \"pep_number\": \"0364\",\n \"pandoc_version\": \"3.5\"\n}"}}},{"rowIdx":583,"cells":{"id":{"kind":"string","value":"0318"},"text":{"kind":"string","value":"PEP: 318 Title: Decorators for Functions and Methods Version: $Revision$\nLast-Modified: $Date$ Author: Kevin D. Smith\n, Jim J. Jewett, Skip Montanaro, Anthony\nBaxter Status: Final Type: Standards Track Content-Type: text/x-rst\nCreated: 05-Jun-2003 Python-Version: 2.4 Post-History: 09-Jun-2003,\n10-Jun-2003, 27-Feb-2004, 23-Mar-2004, 30-Aug-2004, 02-Sep-2004\n\nWarningWarningWarning\n\nThis document is meant to describe the decorator syntax and the process\nthat resulted in the decisions that were made. It does not attempt to\ncover the huge number of potential alternative syntaxes, nor is it an\nattempt to exhaustively list all the positives and negatives of each\nform.\n\nAbstract\n\nThe current method for transforming functions and methods (for instance,\ndeclaring them as a class or static method) is awkward and can lead to\ncode that is difficult to understand. Ideally, these transformations\nshould be made at the same point in the code where the declaration\nitself is made. This PEP introduces new syntax for transformations of a\nfunction or method declaration.\n\nMotivation\n\nThe current method of applying a transformation to a function or method\nplaces the actual transformation after the function body. For large\nfunctions this separates a key component of the function's behavior from\nthe definition of the rest of the function's external interface. For\nexample:\n\n def foo(self):\n perform method operation\n foo = classmethod(foo)\n\nThis becomes less readable with longer methods. It also seems less than\npythonic to name the function three times for what is conceptually a\nsingle declaration. A solution to this problem is to move the\ntransformation of the method closer to the method's own declaration. The\nintent of the new syntax is to replace :\n\n def foo(cls):\n pass\n foo = synchronized(lock)(foo)\n foo = classmethod(foo)\n\nwith an alternative that places the decoration in the function's\ndeclaration:\n\n @classmethod\n @synchronized(lock)\n def foo(cls):\n pass\n\nModifying classes in this fashion is also possible, though the benefits\nare not as immediately apparent. Almost certainly, anything which could\nbe done with class decorators could be done using metaclasses, but using\nmetaclasses is sufficiently obscure that there is some attraction to\nhaving an easier way to make simple modifications to classes. For Python\n2.4, only function/method decorators are being added.\n\nPEP 3129 proposes to add class decorators as of Python 2.6.\n\nWhy Is This So Hard?\n\nTwo decorators (classmethod() and staticmethod()) have been available in\nPython since version 2.2. It's been assumed since approximately that\ntime that some syntactic support for them would eventually be added to\nthe language. Given this assumption, one might wonder why it's been so\ndifficult to arrive at a consensus. Discussions have raged off-and-on at\ntimes in both comp.lang.python and the python-dev mailing list about how\nbest to implement function decorators. There is no one clear reason why\nthis should be so, but a few problems seem to be most divisive.\n\n- Disagreement about where the \"declaration of intent\" belongs. Almost\n everyone agrees that decorating/transforming a function at the end\n of its definition is suboptimal. Beyond that there seems to be no\n clear consensus where to place this information.\n- Syntactic constraints. Python is a syntactically simple language\n with fairly strong constraints on what can and can't be done without\n \"messing things up\" (both visually and with regards to the language\n parser). There's no obvious way to structure this information so\n that people new to the concept will think, \"Oh yeah, I know what\n you're doing.\" The best that seems possible is to keep new users\n from creating a wildly incorrect mental model of what the syntax\n means.\n- Overall unfamiliarity with the concept. For people who have a\n passing acquaintance with algebra (or even basic arithmetic) or have\n used at least one other programming language, much of Python is\n intuitive. Very few people will have had any experience with the\n decorator concept before encountering it in Python. There's just no\n strong preexisting meme that captures the concept.\n- Syntax discussions in general appear to cause more contention than\n almost anything else. Readers are pointed to the ternary operator\n discussions that were associated with PEP 308 for another example of\n this.\n\nBackground\n\nThere is general agreement that syntactic support is desirable to the\ncurrent state of affairs. Guido mentioned syntactic support for\ndecorators in his DevDay keynote presentation at the 10th Python\nConference, though he later said it was only one of several extensions\nhe proposed there \"semi-jokingly\". Michael Hudson raised the topic on\npython-dev shortly after the conference, attributing the initial\nbracketed syntax to an earlier proposal on comp.lang.python by Gareth\nMcCaughan.\n\nClass decorations seem like an obvious next step because class\ndefinition and function definition are syntactically similar, however\nGuido remains unconvinced, and class decorators will almost certainly\nnot be in Python 2.4.\n\nThe discussion continued on and off on python-dev from February 2002\nthrough July 2004. Hundreds and hundreds of posts were made, with people\nproposing many possible syntax variations. Guido took a list of\nproposals to EuroPython 2004, where a discussion took place. Subsequent\nto this, he decided that we'd have the Java-style @decorator syntax, and\nthis appeared for the first time in 2.4a2. Barry Warsaw named this the\n'pie-decorator' syntax, in honor of the Pie-thon Parrot shootout which\noccurred around the same time as the decorator syntax, and because the @\nlooks a little like a pie. Guido outlined his case on Python-dev,\nincluding this piece on some of the (many) rejected forms.\n\nOn the name 'Decorator'\n\nThere's been a number of complaints about the choice of the name\n'decorator' for this feature. The major one is that the name is not\nconsistent with its use in the GoF book. The name 'decorator' probably\nowes more to its use in the compiler area -- a syntax tree is walked and\nannotated. It's quite possible that a better name may turn up.\n\nDesign Goals\n\nThe new syntax should\n\n- work for arbitrary wrappers, including user-defined callables and\n the existing builtins classmethod() and staticmethod(). This\n requirement also means that a decorator syntax must support passing\n arguments to the wrapper constructor\n- work with multiple wrappers per definition\n- make it obvious what is happening; at the very least it should be\n obvious that new users can safely ignore it when writing their own\n code\n- be a syntax \"that ... [is] easy to remember once explained\"\n- not make future extensions more difficult\n- be easy to type; programs that use it are expected to use it very\n frequently\n- not make it more difficult to scan through code quickly. It should\n still be easy to search for all definitions, a particular\n definition, or the arguments that a function accepts\n- not needlessly complicate secondary support tools such as\n language-sensitive editors and other \"toy parser tools out there\"\n- allow future compilers to optimize for decorators. With the hope of\n a JIT compiler for Python coming into existence at some point this\n tends to require the syntax for decorators to come before the\n function definition\n- move from the end of the function, where it's currently hidden, to\n the front where it is more in your face\n\nAndrew Kuchling has links to a bunch of the discussions about\nmotivations and use cases in his blog. Particularly notable is Jim\nHuginin's list of use cases.\n\nCurrent Syntax\n\nThe current syntax for function decorators as implemented in Python\n2.4a2 is:\n\n @dec2\n @dec1\n def func(arg1, arg2, ...):\n pass\n\nThis is equivalent to:\n\n def func(arg1, arg2, ...):\n pass\n func = dec2(dec1(func))\n\nwithout the intermediate assignment to the variable func. The decorators\nare near the function declaration. The @ sign makes it clear that\nsomething new is going on here.\n\nThe rationale for the order of application (bottom to top) is that it\nmatches the usual order for function-application. In mathematics,\ncomposition of functions (g o f)(x) translates to g(f(x)). In Python,\n@g @f def foo() translates to foo=g(f(foo).\n\nThe decorator statement is limited in what it can accept -- arbitrary\nexpressions will not work. Guido preferred this because of a gut\nfeeling.\n\nThe current syntax also allows decorator declarations to call a function\nthat returns a decorator:\n\n @decomaker(argA, argB, ...)\n def func(arg1, arg2, ...):\n pass\n\nThis is equivalent to:\n\n func = decomaker(argA, argB, ...)(func)\n\nThe rationale for having a function that returns a decorator is that the\npart after the @ sign can be considered to be an expression (though\nsyntactically restricted to just a function), and whatever that\nexpression returns is called. See declaration arguments.\n\nSyntax Alternatives\n\nThere have been a large number of different syntaxes proposed --rather\nthan attempting to work through these individual syntaxes, it's\nworthwhile to break the syntax discussion down into a number of areas.\nAttempting to discuss each possible syntax individually would be an act\nof madness, and produce a completely unwieldy PEP.\n\nDecorator Location\n\nThe first syntax point is the location of the decorators. For the\nfollowing examples, we use the @syntax used in 2.4a2.\n\nDecorators before the def statement are the first alternative, and the\nsyntax used in 2.4a2:\n\n @classmethod\n def foo(arg1,arg2):\n pass\n\n @accepts(int,int)\n @returns(float)\n def bar(low,high):\n pass\n\nThere have been a number of objections raised to this location -- the\nprimary one is that it's the first real Python case where a line of code\nhas an effect on a following line. The syntax available in 2.4a3\nrequires one decorator per line (in a2, multiple decorators could be\nspecified on the same line), and the final decision for 2.4 final stayed\none decorator per line.\n\nPeople also complained that the syntax quickly got unwieldy when\nmultiple decorators were used. The point was made, though, that the\nchances of a large number of decorators being used on a single function\nwere small and thus this was not a large worry.\n\nSome of the advantages of this form are that the decorators live outside\nthe method body -- they are obviously executed at the time the function\nis defined.\n\nAnother advantage is that a prefix to the function definition fits the\nidea of knowing about a change to the semantics of the code before the\ncode itself, thus you know how to interpret the code's semantics\nproperly without having to go back and change your initial perceptions\nif the syntax did not come before the function definition.\n\nGuido decided he preferred having the decorators on the line before the\n'def', because it was felt that a long argument list would mean that the\ndecorators would be 'hidden'\n\nThe second form is the decorators between the def and the function name,\nor the function name and the argument list:\n\n def @classmethod foo(arg1,arg2):\n pass\n\n def @accepts(int,int),@returns(float) bar(low,high):\n pass\n\n def foo @classmethod (arg1,arg2):\n pass\n\n def bar @accepts(int,int),@returns(float) (low,high):\n pass\n\nThere are a couple of objections to this form. The first is that it\nbreaks easily 'greppability' of the source -- you can no longer search\nfor 'def foo(' and find the definition of the function. The second, more\nserious, objection is that in the case of multiple decorators, the\nsyntax would be extremely unwieldy.\n\nThe next form, which has had a number of strong proponents, is to have\nthe decorators between the argument list and the trailing : in the 'def'\nline:\n\n def foo(arg1,arg2) @classmethod:\n pass\n\n def bar(low,high) @accepts(int,int),@returns(float):\n pass\n\nGuido summarized the arguments against this form (many of which also\napply to the previous form) as:\n\n- it hides crucial information (e.g. that it is a static method) after\n the signature, where it is easily missed\n- it's easy to miss the transition between a long argument list and a\n long decorator list\n- it's cumbersome to cut and paste a decorator list for reuse, because\n it starts and ends in the middle of a line\n\nThe next form is that the decorator syntax goes inside the method body\nat the start, in the same place that docstrings currently live:\n\n def foo(arg1,arg2):\n @classmethod\n pass\n\n def bar(low,high):\n @accepts(int,int)\n @returns(float)\n pass\n\nThe primary objection to this form is that it requires \"peeking inside\"\nthe method body to determine the decorators. In addition, even though\nthe code is inside the method body, it is not executed when the method\nis run. Guido felt that docstrings were not a good counter-example, and\nthat it was quite possible that a 'docstring' decorator could help move\nthe docstring to outside the function body.\n\nThe final form is a new block that encloses the method's code. For this\nexample, we'll use a 'decorate' keyword, as it makes no sense with the\n@syntax. :\n\n decorate:\n classmethod\n def foo(arg1,arg2):\n pass\n\n decorate:\n accepts(int,int)\n returns(float)\n def bar(low,high):\n pass\n\nThis form would result in inconsistent indentation for decorated and\nundecorated methods. In addition, a decorated method's body would start\nthree indent levels in.\n\nSyntax forms\n\n- @decorator:\n\n @classmethod\n def foo(arg1,arg2):\n pass\n\n @accepts(int,int)\n @returns(float)\n def bar(low,high):\n pass\n\n The major objections against this syntax are that the @ symbol is\n not currently used in Python (and is used in both IPython and Leo),\n and that the @ symbol is not meaningful. Another objection is that\n this \"wastes\" a currently unused character (from a limited set) on\n something that is not perceived as a major use.\n\n- |decorator:\n\n |classmethod\n def foo(arg1,arg2):\n pass\n\n |accepts(int,int)\n |returns(float)\n def bar(low,high):\n pass\n\n This is a variant on the @decorator syntax -- it has the advantage\n that it does not break IPython and Leo. Its major disadvantage\n compared to the @syntax is that the | symbol looks like both a\n capital I and a lowercase l.\n\n- list syntax:\n\n [classmethod]\n def foo(arg1,arg2):\n pass\n\n [accepts(int,int), returns(float)]\n def bar(low,high):\n pass\n\n The major objection to the list syntax is that it's currently\n meaningful (when used in the form before the method). It's also\n lacking any indication that the expression is a decorator.\n\n- list syntax using other brackets (<...>, [[...]], ...):\n\n \n def foo(arg1,arg2):\n pass\n\n \n def bar(low,high):\n pass\n\n None of these alternatives gained much traction. The alternatives\n which involve square brackets only serve to make it obvious that the\n decorator construct is not a list. They do nothing to make parsing\n any easier. The '<...>' alternative presents parsing problems\n because '<' and '>' already parse as un-paired. They present a\n further parsing ambiguity because a right angle bracket might be a\n greater than symbol instead of a closer for the decorators.\n\n- decorate()\n\n The decorate() proposal was that no new syntax be implemented --\n instead a magic function that used introspection to manipulate the\n following function. Both Jp Calderone and Philip Eby produced\n implementations of functions that did this. Guido was pretty firmly\n against this -- with no new syntax, the magicness of a function like\n this is extremely high:\n\n Using functions with \"action-at-a-distance\" through\n sys.settraceback may be okay for an obscure feature that can't be\n had any other way yet doesn't merit changes to the language, but\n that's not the situation for decorators. The widely held view here\n is that decorators need to be added as a syntactic feature to\n avoid the problems with the postfix notation used in 2.2 and 2.3.\n Decorators are slated to be an important new language feature and\n their design needs to be forward-looking, not constrained by what\n can be implemented in 2.3.\n\n- _`new keyword (and block)`\n\n This idea was the consensus alternate from comp.lang.python (more on\n this in Community Consensus below.) Robert Brewer wrote up a\n detailed J2 proposal document outlining the arguments in favor of\n this form. The initial issues with this form are:\n\n - It requires a new keyword, and therefore a\n from __future__ import decorators statement.\n - The choice of keyword is contentious. However using emerged as\n the consensus choice, and is used in the proposal and\n implementation.\n - The keyword/block form produces something that looks like a\n normal code block, but isn't. Attempts to use statements in this\n block will cause a syntax error, which may confuse users.\n\n A few days later, Guido rejected the proposal on two main grounds,\n firstly:\n\n ... the syntactic form of an indented block strongly suggests that\n its contents should be a sequence of statements, but in fact it is\n not -- only expressions are allowed, and there is an implicit\n \"collecting\" of these expressions going on until they can be\n applied to the subsequent function definition. ...\n\n and secondly:\n\n ... the keyword starting the line that heads a block draws a lot\n of attention to it. This is true for \"if\", \"while\", \"for\", \"try\",\n \"def\" and \"class\". But the \"using\" keyword (or any other keyword\n in its place) doesn't deserve that attention; the emphasis should\n be on the decorator or decorators inside the suite, since those\n are the important modifiers to the function definition that\n follows. ...\n\n Readers are invited to read the full response.\n\n http://www.aminus.org/rbre/python/pydec.html\n\n https://mail.python.org/pipermail/python-dev/2004-September/048518.html\n\n https://mail.python.org/pipermail/python-dev/2004-September/048518.html\n\n- Other forms\n\n There are plenty of other variants and proposals on the wiki page.\n\nWhy @?\n\nThere is some history in Java using @ initially as a marker in Javadoc\ncomments and later in Java 1.5 for annotations, which are similar to\nPython decorators. The fact that @ was previously unused as a token in\nPython also means it's clear there is no possibility of such code being\nparsed by an earlier version of Python, leading to possibly subtle\nsemantic bugs. It also means that ambiguity of what is a decorator and\nwhat isn't is removed. That said, @ is still a fairly arbitrary choice.\nSome have suggested using | instead.\n\nFor syntax options which use a list-like syntax (no matter where it\nappears) to specify the decorators a few alternatives were proposed:\n[|...|], *[...]*, and <...>.\n\nCurrent Implementation, History\n\nGuido asked for a volunteer to implement his preferred syntax, and Mark\nRussell stepped up and posted a patch to SF. This new syntax was\navailable in 2.4a2. :\n\n @dec2\n @dec1\n def func(arg1, arg2, ...):\n pass\n\nThis is equivalent to:\n\n def func(arg1, arg2, ...):\n pass\n func = dec2(dec1(func))\n\nthough without the intermediate creation of a variable named func.\n\nThe version implemented in 2.4a2 allowed multiple @decorator clauses on\na single line. In 2.4a3, this was tightened up to only allowing one\ndecorator per line.\n\nA previous patch from Michael Hudson which implements the list-after-def\nsyntax is also still kicking around.\n\nAfter 2.4a2 was released, in response to community reaction, Guido\nstated that he'd re-examine a community proposal, if the community could\ncome up with a community consensus, a decent proposal, and an\nimplementation. After an amazing number of posts, collecting a vast\nnumber of alternatives in the Python wiki, a community consensus emerged\n(below). Guido subsequently rejected this alternate form, but added:\n\n In Python 2.4a3 (to be released this Thursday), everything remains as\n currently in CVS. For 2.4b1, I will consider a change of @ to some\n other single character, even though I think that @ has the advantage\n of being the same character used by a similar feature in Java. It's\n been argued that it's not quite the same, since @ in Java is used for\n attributes that don't change semantics. But Python's dynamic nature\n makes that its syntactic elements never mean quite the same thing as\n similar constructs in other languages, and there is definitely\n significant overlap. Regarding the impact on 3rd party tools:\n IPython's author doesn't think there's going to be much impact; Leo's\n author has said that Leo will survive (although it will cause him and\n his users some transitional pain). I actually expect that picking a\n character that's already used elsewhere in Python's syntax might be\n harder for external tools to adapt to, since parsing will have to be\n more subtle in that case. But I'm frankly undecided, so there's some\n wiggle room here. I don't want to consider further syntactic\n alternatives at this point: the buck has to stop at some point,\n everyone has had their say, and the show must go on.\n\nCommunity Consensus\n\nThis section documents the rejected J2 syntax, and is included for\nhistorical completeness.\n\nThe consensus that emerged on comp.lang.python was the proposed J2\nsyntax (the \"J2\" was how it was referenced on the PythonDecorators wiki\npage): the new keyword using prefixing a block of decorators before the\ndef statement. For example:\n\n using:\n classmethod\n synchronized(lock)\n def func(cls):\n pass\n\nThe main arguments for this syntax fall under the \"readability counts\"\ndoctrine. In brief, they are:\n\n- A suite is better than multiple @lines. The using keyword and block\n transforms the single-block def statement into a multiple-block\n compound construct, akin to try/finally and others.\n- A keyword is better than punctuation for a new token. A keyword\n matches the existing use of tokens. No new token category is\n necessary. A keyword distinguishes Python decorators from Java\n annotations and .Net attributes, which are significantly different\n beasts.\n\nRobert Brewer wrote a detailed proposal for this form, and Michael\nSparks produced a patch.\n\nAs noted previously, Guido rejected this form, outlining his problems\nwith it in a message to python-dev and comp.lang.python.\n\nExamples\n\nMuch of the discussion on comp.lang.python and the python-dev mailing\nlist focuses on the use of decorators as a cleaner way to use the\nstaticmethod() and classmethod() builtins. This capability is much more\npowerful than that. This section presents some examples of use.\n\n1. Define a function to be executed at exit. Note that the function\n isn't actually \"wrapped\" in the usual sense. :\n\n def onexit(f):\n import atexit\n atexit.register(f)\n return f\n\n @onexit\n def func():\n ...\n\n Note that this example is probably not suitable for real usage, but\n is for example purposes only.\n\n2. Define a class with a singleton instance. Note that once the class\n disappears enterprising programmers would have to be more creative\n to create more instances. (From Shane Hathaway on python-dev.) :\n\n def singleton(cls):\n instances = {}\n def getinstance():\n if cls not in instances:\n instances[cls] = cls()\n return instances[cls]\n return getinstance\n\n @singleton\n class MyClass:\n ...\n\n3. Add attributes to a function. (Based on an example posted by Anders\n Munch on python-dev.) :\n\n def attrs(**kwds):\n def decorate(f):\n for k in kwds:\n setattr(f, k, kwds[k])\n return f\n return decorate\n\n @attrs(versionadded=\"2.2\",\n author=\"Guido van Rossum\")\n def mymethod(f):\n ...\n\n4. Enforce function argument and return types. Note that this copies\n the func_name attribute from the old to the new function. func_name\n was made writable in Python 2.4a3:\n\n def accepts(*types):\n def check_accepts(f):\n assert len(types) == f.func_code.co_argcount\n def new_f(*args, **kwds):\n for (a, t) in zip(args, types):\n assert isinstance(a, t), \\\n \"arg %r does not match %s\" % (a,t)\n return f(*args, **kwds)\n new_f.func_name = f.func_name\n return new_f\n return check_accepts\n\n def returns(rtype):\n def check_returns(f):\n def new_f(*args, **kwds):\n result = f(*args, **kwds)\n assert isinstance(result, rtype), \\\n \"return value %r does not match %s\" % (result,rtype)\n return result\n new_f.func_name = f.func_name\n return new_f\n return check_returns\n\n @accepts(int, (int,float))\n @returns((int,float))\n def func(arg1, arg2):\n return arg1 * arg2\n\n5. Declare that a class implements a particular (set of) interface(s).\n This is from a posting by Bob Ippolito on python-dev based on\n experience with PyProtocols. :\n\n def provides(*interfaces):\n \"\"\"\n An actual, working, implementation of provides for\n the current implementation of PyProtocols. Not\n particularly important for the PEP text.\n \"\"\"\n def provides(typ):\n declareImplementation(typ, instancesProvide=interfaces)\n return typ\n return provides\n\n class IBar(Interface):\n \"\"\"Declare something about IBar here\"\"\"\n\n @provides(IBar)\n class Foo(object):\n \"\"\"Implement something here...\"\"\"\n\nOf course, all these examples are possible today, though without\nsyntactic support.\n\n(No longer) Open Issues\n\n1. It's not yet certain that class decorators will be incorporated into\n the language at a future point. Guido expressed skepticism about the\n concept, but various people have made some strong arguments (search\n for PEP 318 -- posting draft) on their behalf in python-dev. It's\n exceedingly unlikely that class decorators will be in Python 2.4.\n\n https://mail.python.org/pipermail/python-dev/2004-March/thread.html\n\n PEP 3129 proposes to add class decorators as of Python 2.6.\n\n2. The choice of the @ character will be re-examined before Python\n 2.4b1.\n\n In the end, the @ character was kept.\n\nCopyright\n\nThis document has been placed in the public domain.\n\n\f\n\n Local Variables: mode: indented-text indent-tabs-mode: nil\n sentence-end-double-space: t fill-column: 70 End:"},"source":{"kind":"string","value":"python-peps"},"added":{"kind":"string","value":"2024-10-18T13:23:33.822822"},"created":{"kind":"timestamp","value":"2003-06-05T00:00:00","string":"2003-06-05T00:00:00"},"metadata":{"kind":"string","value":"{\n \"license\": \"Public Domain\",\n \"url\": \"https://peps.python.org/pep-0318/\",\n \"authors\": [\n \"Anthony Baxter\",\n \"Jim J. Jewett\",\n \"Kevin D. Smith\",\n \"Skip Montanaro\"\n ],\n \"pep_number\": \"0318\",\n \"pandoc_version\": \"3.5\"\n}"}}},{"rowIdx":584,"cells":{"id":{"kind":"string","value":"0730"},"text":{"kind":"string","value":"PEP: 730 Title: Adding iOS as a supported platform Author: Russell\nKeith-Magee Sponsor: Ned Deily\n Discussions-To:\nhttps://discuss.python.org/t/pep730-adding-ios-as-a-supported-platform/35854\nStatus: Final Type: Standards Track Content-Type: text/x-rst Created:\n09-Oct-2023 Python-Version: 3.13 Resolution:\nhttps://discuss.python.org/t/pep-730-adding-ios-as-a-supported-platform/35854/66\n\npython:using-ios\n\nAbstract\n\nThis PEP proposes adding iOS as a supported platform in CPython. The\ninitial goal is to achieve Tier 3 support for Python 3.13. This PEP\ndescribes the technical aspects of the changes that are required to\nsupport iOS. It also describes the project management concerns related\nto adoption of iOS as a Tier 3 platform.\n\nMotivation\n\nOver the last 15 years, mobile platforms have become increasingly\nimportant parts of the computing landscape. iOS is one of two operating\nsystems that control the vast majority of these devices. However, there\nis no official support for iOS in CPython.\n\nThe BeeWare Project and Kivy have both supported iOS for almost 10\nyears. This support has been able to generate applications that have\nbeen accepted for publication in the iOS App Store. This demonstrates\nthe technical feasibility of iOS support.\n\nIt is important for the future of Python as a language that it is able\nto be used on any hardware or OS that has widespread adoption. If Python\ncannot be used a on a platform that has widespread use, adoption of the\nlanguage will be impacted as potential users will adopt other languages\nthat do provide support for these platforms.\n\nRationale\n\nDevelopment landscape\n\niOS provides a single API, but 2 distinct ABIs - iphoneos (physical\ndevices), and iphonesimulator. Each of these ABIs can be provided on\nmultiple CPU architectures. At time of writing, Apple officially\nsupports arm64 on the device ABI, and arm64 and x86_64 are supported on\nthe simulator ABI.\n\nAs with macOS, iOS supports the creation of \"fat\" binaries that contains\nmultiple CPU architectures. However, fat binaries cannot span ABIs. That\nis, it is possible to have a fat simulator binary, and a fat device\nbinary, but it is not possible to create a single fat \"iOS\" binary that\ncovers both simulator and device needs. To support distribution of a\nsingle development artefact, Apple uses an \"XCframework\" structure - a\nwrapper around multiple ABIs that implement a common API.\n\niOS runs on a Darwin kernel, similar to macOS. However, there is a need\nto differentiate between macOS and iOS at an implementation level, as\nthere are significant platform differences between iOS and macOS.\n\niOS code is compiled for compatibility against a minimum iOS version.\n\nApple frequently refers to \"iPadOS\" in their marketing material.\nHowever, from a development perspective, there is no discernable\ndifference between iPadOS and iOS. A binary that has been compiled for\nthe iphoneos or iphonesimulator ABIs can be deployed on iPad.\n\nOther Apple platforms, such as tvOS, watchOS, and visionOS, use\ndifferent ABIs, and are not covered by this PEP.\n\nPOSIX compliance\n\niOS is broadly a POSIX platform. However, similar to WASI/Emscripten,\nthere are POSIX APIs that exist on iOS, but cannot be used; and POSIX\nAPIs that don't exist at all.\n\nMost notable of these is the fact that iOS does not provide any form of\nmultiprocess support. fork and spawn both exist in the iOS API; however,\nif they are invoked, the invoking iOS process stops, and the new process\ndoesn't start.\n\nUnlike WASI/Emscripten, threading is supported on iOS.\n\nThere are also significant limits to socket handling. Due to process\nsandboxing, there is no availability of interprocess communication via\nsocket. However, sockets for network communication are available.\n\nDynamic libraries\n\nThe iOS App Store guidelines allow apps to be written in languages other\nthan Objective C or Swift. However, they have very strict guidelines\nabout the structure of apps that are submitted for distribution.\n\niOS apps can use dynamically loaded libraries; however, there are very\nstrict requirements on how dynamically loaded content is packaged for\nuse on iOS:\n\n- Dynamic binary content must be compiled as dynamic libraries, not\n shared objects or binary bundles.\n- They must be packaged in the app bundle as Frameworks.\n- Each Framework can only contain a single dynamic library.\n- The Framework must be contained in the iOS App's Frameworks folder.\n- A Framework may not contain any non-library content.\n\nThis imposes some constraints on the operation of CPython. It is not\npossible store binary modules in the lib-dynload and/or site-packages\nfolders; they must be stored in the app's Frameworks folder, with each\nmodule wrapped in a Framework. This also means that the common\nassumption that a Python module can construct the location of a binary\nmodule by using the __file__ attribute of the Python module no longer\nholds.\n\nAs with macOS, compiling a binary module that is accessible from a\nstatically-linked build of Python requires the use of the\n--undefined dynamic_lookup option to avoid linking libpython3.x into\nevery binary module. However, on iOS, this compiler flag raises a\ndeprecation warning when it is used. A warning from this flag has been\nobserved on macOS as well - however, responses from Apple staff suggest\nthat they do not intend to break the CPython ecosystem by removing this\noption. As Python does not currently have a notable presence on iOS, it\nis difficult to judge whether iOS usage of this flag would fall under\nthe same umbrella.\n\nConsole and interactive usage\n\nDistribution of a traditional CPython REPL or interactive \"python.exe\"\nshould not be considered a goal of this work.\n\nMobile devices (including iOS) do not provide a TTY-style console. They\ndo not provide stdin, stdout or stderr. iOS provides a system log, and\nit is possible to install a redirection so that all stdout and stderr\ncontent is redirected to the system log; but there is no analog for\nstdin.\n\nIn addition, iOS places restrictions on downloading additional code at\nruntime (as this behavior would be functionally indistinguishable from\ntrying to work around App Store review). As a result, a traditional\n\"create a virtual environment and pip install\" development experience\nwill not be viable on iOS.\n\nIt is possible to build an native iOS application that provides a REPL\ninterface. This would be closer to an IDLE-style user experience;\nhowever, Tkinter cannot be used on iOS, so any app would require a\nground-up rewrite. The iOS app store already contains several examples\nof apps in this category (e.g., Pythonista and Pyto). The focus of this\nwork would be to provide an embedded distribution that IDE-style native\ninterfaces could utilize, not a user-facing \"app\" interface to iOS on\nPython.\n\nSpecification\n\nPlatform identification\n\nsys\n\nsys.platform will identify as \"ios\" on both simulator and physical\ndevices.\n\nsys.implementation._multiarch will describe the ABI and CPU\narchitecture:\n\n- \"arm64-iphoneos\" for ARM64 devices\n- \"arm64-iphonesimulator\" for ARM64 simulators\n- \"x86_64-iphonesimulator\" for x86_64 simulators\n\nplatform\n\nplatform will be modified to support returning iOS-specific details.\nMost of the values returned by the platform module will match those\nreturned by os.uname(), with the exception of:\n\n- platform.system() - \"iOS\" or iPadOS (depending on the hardware in\n use), instead of \"Darwin\"\n- platform.release() - the iOS version number, as a string (e.g.,\n \"16.6.1\"), instead of the Darwin kernel version.\n\nIn addition, a platform.ios_ver() method will be added. This mirrors\nplatform.mac_ver(), which can be used to provide macOS version\ninformation. ios_ver() will return a namedtuple that contains the\nfollowing:\n\n- system - the OS name (iOS or iPadOS, depending on hardware)\n- release - the iOS version, as a string (e.g., \"16.6.1\").\n- model - the model identifier of the device, as a string (e.g.,\n \"iPhone13,2\"). On simulators, this will return \"iPhone\" or \"iPad\",\n depending on the simulator device.\n- is_simulator - a boolean indicating if the device is a simulator.\n\nos\n\nos.uname() will return the raw result of a POSIX uname() call. This will\nresult in the following values:\n\n- sysname - \"Darwin\"\n- release - The Darwin kernel version (e.g., \"22.6.0\")\n\nThis approach treats the os module as a \"raw\" interface to system APIs,\nand platform as a higher-level API providing more generally useful\nvalues.\n\nsysconfig\n\nThe sysconfig module will use the minimum iOS version as part of\nsysconfig.get_platform() (e.g., \"ios-12.0-arm64-iphoneos\"). The\nsysconfigdata_name and Config makefile will follow the same patterns as\nexisting platforms (using sys.platform, sys.implementation._multiarch\netc.) to construct identifiers.\n\nSubprocess support\n\niOS will leverage the pattern for disabling subprocesses established by\nWASI/Emscripten. The subprocess module will raise an exception if an\nattempt is made to start a subprocess, and os.fork and os.spawn calls\nwill raise an OSError.\n\nDynamic module loading\n\nTo accommodate iOS dynamic loading, the importlib bootstrap will be\nextended to add a metapath finder that can convert a request for a\nPython binary module into a Framework location. This finder will only be\ninstalled if sys.platform == \"ios\".\n\nThis finder will convert a Python module name (e.g., foo.bar._whiz) into\na unique Framework name by using the full module name as the framework\nname (i.e., foo.bar._whiz.framework). A framework is a directory; the\nfinder will look for a binary named foo.bar._whiz in that directory.\n\nCompilation\n\nThe only binary format that will be supported is a dynamically-linkable\nlibpython3.x.dylib, packaged in an iOS-compatible framework format.\nWhile the --undefined dynamic_lookup compiler option currently works,\nthe long-term viability of the option cannot be guaranteed. Rather than\nrely on a compiler flag with an uncertain future, binary modules on iOS\nwill be linked with libpython3.x.dylib. This means iOS binary modules\nwill not be loadable by an executable that has been statically linked\nagainst libpython3.x.a. Therefore, a static libpython3.x.a iOS library\nwill not be supported. This is the same pattern used by CPython on\nWindows.\n\nBuilding CPython for iOS requires the use of the cross-platform tooling\nin CPython's configure build system. A single\nconfigure/make/make install pass will produce a Python.framework\nartefact that can be used on a single ABI and architecture.\n\nAdditional tooling will be required to merge the Python.framework builds\nfor multiple architectures into a single \"fat\" library. Tooling will\nalso be required to merge multiple ABIs into the XCframework format that\nApple uses to distribute multiple frameworks for different ABIs in a\nsingle bundle.\n\nAn Xcode project will be provided for the purpose of running the CPython\ntest suite. Tooling will be provided to automate the process of\ncompiling the test suite binary, start the simulator, install the test\nsuite, and execute it.\n\nDistribution\n\nAdding iOS as a Tier 3 platform only requires adding support for\ncompiling an iOS-compatible build from an unpatched CPython code\ncheckout. It does not require production of officially distributed iOS\nartefacts for use by end-users.\n\nIf/when iOS is updated to Tier 2 or 1 support, the tooling used to\ngenerate an XCframework package could be used to produce an iOS\ndistribution artefact. This could then be distributed as an \"embedded\ndistribution\" analogous to the Windows embedded distribution, or as a\nCocoaPod or Swift package that could be added to an Xcode project.\n\nCI resources\n\nAnaconda has offered to provide physical hardware to run iOS buildbots.\n\nGitHub Actions is able to host iOS simulators on their macOS machines,\nand the iOS simulator can be controlled by scripting environments. The\nfree tier currently only provides x86_64 macOS machines; however ARM64\nrunners recently became available on paid plans __.\nHowever, in order to avoid exhausting macOS runner resources, a GitHub\nActions run for iOS will not be added as part of the standard CI\nconfiguration.\n\nPackaging\n\niOS will not provide a \"universal\" wheel format. Instead, wheels will be\nprovided for each ABI-arch combination.\n\niOS wheels will use tags:\n\n- ios_12_0_arm64_iphoneos\n- ios_12_0_arm64_iphonesimulator\n- ios_12_0_x86_64_iphonesimulator\n\nIn these tags, \"12.0\" is the minimum supported iOS version. As with\nmacOS, the tag will incorporate the minimum iOS version that is selected\nwhen the wheel is compiled; a wheel compiled with a minimum iOS version\nof 15.0 would use the ios_15_0_* tags. At time of writing, iOS 12.0\nexposes most significant iOS features, while reaching near 100% of\ndevices; this will be used as a floor for iOS version matching.\n\nThese wheels can include binary modules in-situ (i.e., co-located with\nthe Python source, in the same way as wheels for a desktop platform);\nhowever, they will need to be post-processed as binary modules need to\nbe moved into the \"Frameworks\" location for distribution. This can be\nautomated with an Xcode build step.\n\nPEP 11 Update\n\nPEP 11 will be updated to include two of the iOS ABIs:\n\n- arm64-apple-ios\n- arm64-apple-ios-simulator\n\nNed Deily will serve as the initial core team contact for these ABIs.\n\nThe x86_64-apple-ios-simulator target will be supported on a best-effort\nbasis, but will not be targeted for tier 3 support. This is due to the\nimpending deprecation of x86_64 as a simulation platform, combined with\nthe difficulty of commissioning x86_64 macOS hardware at this time.\n\nBackwards Compatibility\n\nAdding a new platform does not introduce any backwards compatibility\nconcerns to CPython itself.\n\nThere may be some backwards compatibility implications on the projects\nthat have historically provided CPython support (i.e., BeeWare and Kivy)\nif the final form of any CPython patches don't align with the patches\nthey have historically used.\n\nAlthough not strictly a backwards compatibility issue, there is a\nplatform adoption consideration. Although CPython itself may support\niOS, if it is unclear how to produce iOS-compatible wheels, and\nprominent libraries like cryptography, Pillow, and NumPy don't provide\niOS wheels, the ability of the community to adopt Python on iOS will be\nlimited. Therefore, it will be necessary to clearly document how\nprojects can add iOS builds to their CI and release tooling. Adding iOS\nsupport to tools like crossenv and cibuildwheel may be one way to\nachieve this.\n\nSecurity Implications\n\nAdding iOS as a new platform does not add any security implications.\n\nHow to Teach This\n\nThe education needs related to this PEP mostly relate to how end-users\ncan add iOS support to their own Xcode projects. This can be\naccomplished with documentation and tutorials on that process. The need\nfor this documentation will increase if/when support raises from Tier 3\nto Tier 2 or 1; however, this transition should also be accompanied with\nsimplified deployment artefacts (such as a Cocoapod or Swift package)\nthat are integrated with Xcode development.\n\nReference Implementation\n\nThe BeeWare Python-Apple-support repository contains a reference patch\nand build tooling to compile a distributable artefact.\n\nBriefcase provides a reference implementation of code to execute test\nsuites on iOS simulators. The Toga Testbed is an example of a test suite\nthat is executed on the iOS simulator using GitHub Actions.\n\nRejected Ideas\n\nSimulator identification\n\nEarlier versions of this PEP suggested the inclusion of\nsys.implementation._simulator attribute to identify when code is running\non device, or on a simulator. This was rejected due to the use of a\nprotected name for a public API, plus the pollution of the sys namespace\nwith an iOS-specific detail.\n\nAnother proposal during discussion was to include a generic\nplatform.is_emulator() API that could be implemented by any platform -\nfor example to differentiate running on x86_64 code on ARM64 hardware,\nor when running in QEMU or other virtualization methods. This was\nrejected on the basis that it wasn't clear what a consistent\ninterpretation of \"emulator\" would be, or how an emulator would be\ndetected outside of the iOS case.\n\nThe decision was made to keep this detail iOS-specific, and include it\non the platform.ios_ver() API.\n\nGNU compiler triples\n\nautoconf requires the use of a GNU compiler triple to identify build and\nhost platforms. However, the autoconf toolchain doesn't provide native\nsupport for iOS simulators, so we are left with the task of working out\nhow to squeeze iOS hardware into GNU's naming regimen.\n\nThis can be done (with some patching of config.sub), but it leads to 2\nmajor sources of naming inconsistency:\n\n- arm64 vs aarch64 as an identifier of 64-bit ARM hardware; and\n- What identifier is used to represent simulators.\n\nApple's own tools use arm64 as the architecture, but appear to be\ntolerant of aarch64 in some cases. The device platform is identified as\niphoneos and iphonesimulator.\n\nRust toolchains uses aarch64 as the architecture, and use\naarch64-apple-ios and aarch64-apple-ios-sim to identify the device\nplatform; however, they use x86_64-apple-ios to represent iOS simulators\non x86_64 hardware.\n\nThe decision was made to use arm64-apple-ios and\narm64-apple-ios-simulator because:\n\n1. The autoconf toolchain already contains support for ios as a\n platform in config.sub; it's only the simulator that doesn't have a\n representation.\n2. The third part of the host triple is used as sys.platform.\n3. When Apple's own tools reference CPU architecture, they use arm64,\n and the GNU tooling usage of the architecture isn't visible outside\n the build process.\n4. When Apple's own tools reference simulator status independent of the\n OS (e.g., in the naming of Swift submodules), they use a -simulator\n suffix.\n5. While some iOS packages will use Rust, all iOS packages will use\n Apple's tooling.\n\nThe initially accepted version of this document used the aarch64 form as\nthe PEP 11 identifier; this was corrected during finalization.\n\n\"Universal\" wheel format\n\nmacOS currently supports 2 CPU architectures. To aid the end-user\ndevelopment experience, Python defines a \"universal2\" wheel format that\nincorporates both x86_64 and ARM64 binaries.\n\nIt would be conceptually possible to offer an analogous \"universal\" iOS\nwheel format. However, this PEP does not use this approach, for 2\nreasons.\n\nFirstly, the experience on macOS, especially in the numerical Python\necosystem, has been that universal wheels can be exceedingly difficult\nto accommodate. While native macOS libraries maintain strong\nmulti-platform support, and Python itself has been updated, the vast\nmajority of upstream non-Python libraries do not provide\nmulti-architecture build support. As a result, compiling universal\nwheels inevitably requires multiple compilation passes, and complex\ndecisions over how to distribute header files for different\narchitectures. As a result of this complexity, many popular projects\n(including NumPy and Pillow) do not provide universal wheels at all,\ninstead providing separate ARM64 and x86_64 wheels.\n\nSecondly, historical experience is that iOS would require a much more\nfluid \"universal\" definition. In the last 10 years, there have been at\nleast 5 different possible interpretations of \"universal\" that would\napply to iOS, including various combinations of armv6, armv7, armv7s,\narm64, x86 and x86_64 architectures, on device and simulator. If defined\nright now, \"universal-iOS\" would likely include x86_64 and arm64 on\nsimulator, and arm64 on device; however, the pending deprecation of\nx86_64 hardware would add another interpretation; and there may be a\nneed to add arm64e as a new device architecture in the future.\nSpecifying iOS wheels as single-platform-only means the Python core team\ncan avoid an ongoing standardization discussion about the updated\n\"universal\" formats.\n\nIt also means wheel publishers are able to make per-project decisions\nover which platforms are feasible to support. For example, a project may\nchoose to drop x86_64 support, or adopt a new architecture earlier than\nother parts of the Python ecosystem. Using platform-specific wheels\nmeans this decision can be left to individual package publishers.\n\nThis decision comes at cost of making deployment more complicated.\nHowever, deployment on iOS is already a complicated process that is best\naided by tools. At present, no binary merging is required, as there is\nonly one on-device architecture, and simulator binaries are not\nconsidered to be distributable artefacts, so only one architecture is\nneeded to build an app for a simulator.\n\nSupporting static builds\n\nWhile the long-term viability of the --undefined dynamic_lookup option\ncannot be guaranteed, the option does exist, and it works. One option\nwould be to ignore the deprecation warning, and hope that Apple either\nreverses the deprecation decision, or never finalizes the deprecation.\n\nGiven that Apple's decision-making process is entirely opaque, this\nwould be, at best, a risky option. When combined with the fact that the\nbroader iOS development ecosystem encourages the use of frameworks,\nthere are no legacy uses of a static library to consider, and the only\nbenefit to a statically-linked iOS libpython3.x.a is a very slightly\nreduced app startup time, omitting support for static builds of\nlibpython3.x seems a reasonable compromise.\n\nIt is worth noting that there has been some discussion on an alternate\napproach to linking on macOS that would remove the need for the\n--undefined dynamic_lookup option, although discussion on this approach\nappears to have stalled due to complications in implementation. If those\ncomplications were to be overcome, it is highly likely that the same\napproach could be used on iOS, which would make a statically linked\nlibpython3.x.a plausible.\n\nThe decision to link binary modules against libpython3.x.dylib would\ncomplicate the introduction of static libpython3.x.a builds in the\nfuture, as the process of moving to a different binary module linking\napproach would require a clear way to differentate \"dynamically-linked\"\niOS binary modules from \"static-compatible\" iOS binary modules. However,\ngiven the lack of tangible benefits of a static libpython3.x.a, it seems\nunlikely that there will be any requirement to make this change.\n\nInteractive/REPL mode\n\nA traditional python.exe command line experience isn't really viable on\nmobile devices, because mobile devices don't have a command line. iOS\napps don't have a stdout, stderr or stdin; and while you can redirect\nstdout and stderr to the system log, there's no source for stdin that\nexists that doesn't also involve building a very specific user-facing\napp that would be closer to an IDLE-style IDE experience. Therefore, the\ndecision was made to only focus on \"embedded mode\" as a target for\nmobile distribution.\n\nx86_64 simulator support\n\nApple no longer sells x86_64 hardware. As a result, commissioning an\nx86_64 buildbot can be difficult. It is possible to run macOS binaries\nin x86_64 compatibility mode on ARM64 hardware; however, this isn't\nideal for testing purposes. Therefore, the x86_64 Simulator\n(x86_64-apple-ios-simulator) will not be added as a Tier 3 target. It is\nhighly likely that iOS support will work on the x86_64 without any\nmodification; this only impacts on the official Tier 3 status.\n\nOn-device testing\n\nCI testing on simulators can be accommodated reasonably easily.\nOn-device testing is much harder, as availability of device farms that\ncould be configured to provide Buildbots or Github Actions runners is\nlimited.\n\nHowever, on device testing may not be necessary. As a data point -\nApple's Xcode Cloud solution doesn't provide on-device testing. They\nrely on the fact that the API is consistent between device and\nsimulator, and ARM64 simulator testing is sufficient to reveal\nCPU-specific issues.\n\nOrdering of _multiarch tags\n\nThe initially accepted version of this document used -\nordering (e.g., iphoneos-arm64) for sys.implementation._multiarch (and\nrelated values, such as wheel tags). The final merged version uses the\n- ordering (e.g., arm64-iphoneos). This is for\nconsistency with compiler triples on other platforms (especially Linux),\nwhich specify the architecture before the operating system.\n\nValues returned by platform.ios_ver()\n\nThe initially accepted version of this document didn't include a system\nidentifier. This was added during the implementation phase to support\nthe implementation of platform.system().\n\nThe initially accepted version of this document also described that\nmin_release would be returned in the ios_ver() result. The final version\nomits the min_release value, as it is not significant at runtime; it\nonly impacts on binary compatibility. The minimum version is included in\nthe value returned by sysconfig.get_platform(), as this is used to\ndefine wheel (and other binary) compatibility.\n\nCopyright\n\nThis document is placed in the public domain or under the\nCC0-1.0-Universal license, whichever is more permissive."},"source":{"kind":"string","value":"python-peps"},"added":{"kind":"string","value":"2024-10-18T13:23:33.855438"},"created":{"kind":"timestamp","value":"2023-10-09T00:00:00","string":"2023-10-09T00:00:00"},"metadata":{"kind":"string","value":"{\n \"license\": \"Public Domain\",\n \"url\": \"https://peps.python.org/pep-0730/\",\n \"authors\": [\n \"Russell Keith-Magee\"\n ],\n \"pep_number\": \"0730\",\n \"pandoc_version\": \"3.5\"\n}"}}},{"rowIdx":585,"cells":{"id":{"kind":"string","value":"0469"},"text":{"kind":"string","value":"PEP: 469 Title: Migration of dict iteration code to Python 3 Version:\n$Revision$ Last-Modified: $Date$ Author: Alyssa Coghlan\n Status: Withdrawn Type: Standards Track\nContent-Type: text/x-rst Created: 18-Apr-2014 Python-Version: 3.5\nPost-History: 18-Apr-2014, 21-Apr-2014\n\nAbstract\n\nFor Python 3, PEP 3106 changed the design of the dict builtin and the\nmapping API in general to replace the separate list based and iterator\nbased APIs in Python 2 with a merged, memory efficient set and multiset\nview based API. This new style of dict iteration was also added to the\nPython 2.7 dict type as a new set of iteration methods.\n\nThis means that there are now 3 different kinds of dict iteration that\nmay need to be migrated to Python 3 when an application makes the\ntransition:\n\n- Lists as mutable snapshots: d.items() -> list(d.items())\n- Iterator objects: d.iteritems() -> iter(d.items())\n- Set based dynamic views: d.viewitems() -> d.items()\n\nThere is currently no widely agreed best practice on how to reliably\nconvert all Python 2 dict iteration code to the common subset of Python\n2 and 3, especially when test coverage of the ported code is limited.\nThis PEP reviews the various ways the Python 2 iteration APIs may be\naccessed, and looks at the available options for migrating that code to\nPython 3 by way of the common subset of Python 2.6+ and Python 3.0+.\n\nThe PEP also considers the question of whether or not there are any\nadditions that may be worth making to Python 3.5 that may ease the\ntransition process for application code that doesn't need to worry about\nsupporting earlier versions when eventually making the leap to Python 3.\n\nPEP Withdrawal\n\nIn writing the second draft of this PEP, I came to the conclusion that\nthe readability of hybrid Python 2/3 mapping code can actually be best\nenhanced by better helper functions rather than by making changes to\nPython 3.5+. The main value I now see in this PEP is as a clear record\nof the recommended approaches to migrating mapping iteration code from\nPython 2 to Python 3, as well as suggesting ways to keep things readable\nand maintainable when writing hybrid code that supports both versions.\n\nNotably, I recommend that hybrid code avoid calling mapping iteration\nmethods directly, and instead rely on builtin functions where possible,\nand some additional helper functions for cases that would be a simple\ncombination of a builtin and a mapping method in pure Python 3 code, but\nneed to be handled slightly differently to get the exact same semantics\nin Python 2.\n\nStatic code checkers like pylint could potentially be extended with an\noptional warning regarding direct use of the mapping iteration methods\nin a hybrid code base.\n\nMapping iteration models\n\nPython 2.7 provides three different sets of methods to extract the keys,\nvalues and items from a dict instance, accounting for 9 out of the 18\npublic methods of the dict type.\n\nIn Python 3, this has been rationalised to just 3 out of 11 public\nmethods (as the has_key method has also been removed).\n\nLists as mutable snapshots\n\nThis is the oldest of the three styles of dict iteration, and hence the\none implemented by the d.keys(), d.values() and d.items() methods in\nPython 2.\n\nThese methods all return lists that are snapshots of the state of the\nmapping at the time the method was called. This has a few consequences:\n\n- the original object can be mutated freely without affecting\n iteration over the snapshot\n- the snapshot can be modified independently of the original object\n- the snapshot consumes memory proportional to the size of the\n original mapping\n\nThe semantic equivalent of these operations in Python 3 are\nlist(d.keys()), list(d.values()) and list(d.iteritems()).\n\nIterator objects\n\nIn Python 2.2, dict objects gained support for the then-new iterator\nprotocol, allowing direct iteration over the keys stored in the\ndictionary, thus avoiding the need to build a list just to iterate over\nthe dictionary contents one entry at a time. iter(d) provides direct\naccess to the iterator object for the keys.\n\nPython 2 also provides a d.iterkeys() method that is essentially\nsynonymous with iter(d), along with d.itervalues() and d.iteritems()\nmethods.\n\nThese iterators provide live views of the underlying object, and hence\nmay fail if the set of keys in the underlying object is changed during\niteration:\n\n >>> d = dict(a=1)\n >>> for k in d:\n ... del d[k]\n ...\n Traceback (most recent call last):\n File \"\", line 1, in \n RuntimeError: dictionary changed size during iteration\n\nAs iterators, iteration over these objects is also a one-time operation:\nonce the iterator is exhausted, you have to go back to the original\nmapping in order to iterate again.\n\nIn Python 3, direct iteration over mappings works the same way as it\ndoes in Python 2. There are no method based equivalents - the semantic\nequivalents of d.itervalues() and d.iteritems() in Python 3 are\niter(d.values()) and iter(d.items()).\n\nThe six and future.utils compatibility modules also both provide\niterkeys(), itervalues() and iteritems() helper functions that provide\nefficient iterator semantics in both Python 2 and 3.\n\nSet based dynamic views\n\nThe model that is provided in Python 3 as a method based API is that of\nset based dynamic views (technically multisets in the case of the\nvalues() view).\n\nIn Python 3, the objects returned by d.keys(), d.values() and d. items()\nprovide a live view of the current state of the underlying object,\nrather than taking a full snapshot of the current state as they did in\nPython 2. This change is safe in many circumstances, but does mean that,\nas with the direct iteration API, it is necessary to avoid adding or\nremoving keys during iteration, in order to avoid encountering the\nfollowing error:\n\n >>> d = dict(a=1)\n >>> for k, v in d.items():\n ... del d[k]\n ...\n Traceback (most recent call last):\n File \"\", line 1, in \n RuntimeError: dictionary changed size during iteration\n\nUnlike the iteration API, these objects are iterables, rather than\niterators: you can iterate over them multiple times, and each time they\nwill iterate over the entire underlying mapping.\n\nThese semantics are also available in Python 2.7 as the d.viewkeys(),\nd.viewvalues() and d.viewitems() methods.\n\nThe future.utils compatibility module also provides viewkeys(),\nviewvalues() and viewitems() helper functions when running on Python 2.7\nor Python 3.x.\n\nMigrating directly to Python 3\n\nThe 2to3 migration tool handles direct migrations to Python 3 in\naccordance with the semantic equivalents described above:\n\n- d.keys() -> list(d.keys())\n- d.values() -> list(d.values())\n- d.items() -> list(d.items())\n- d.iterkeys() -> iter(d.keys())\n- d.itervalues() -> iter(d.values())\n- d.iteritems() -> iter(d.items())\n- d.viewkeys() -> d.keys()\n- d.viewvalues() -> d.values()\n- d.viewitems() -> d.items()\n\nRather than 9 distinct mapping methods for iteration, there are now only\nthe 3 view methods, which combine in straightforward ways with the two\nrelevant builtin functions to cover all of the behaviours that are\navailable as dict methods in Python 2.7.\n\nNote that in many cases d.keys() can be replaced by just d, but the 2to3\nmigration tool doesn't attempt that replacement.\n\nThe 2to3 migration tool also does not provide any automatic assistance\nfor migrating references to these objects as bound or unbound methods -\nit only automates conversions where the API is called immediately.\n\nMigrating to the common subset of Python 2 and 3\n\nWhen migrating to the common subset of Python 2 and 3, the above\ntransformations are not generally appropriate, as they all either result\nin the creation of a redundant list in Python 2, have unexpectedly\ndifferent semantics in at least some cases, or both.\n\nSince most code running in the common subset of Python 2 and 3 supports\nat least as far back as Python 2.6, the currently recommended approach\nto conversion of mapping iteration operation depends on two helper\nfunctions for efficient iteration over mapping values and mapping item\ntuples:\n\n- d.keys() -> list(d)\n- d.values() -> list(itervalues(d))\n- d.items() -> list(iteritems(d))\n- d.iterkeys() -> iter(d)\n- d.itervalues() -> itervalues(d)\n- d.iteritems() -> iteritems(d)\n\nBoth six and future.utils provide appropriate definitions of\nitervalues() and iteritems() (along with essentially redundant\ndefinitions of iterkeys()). Creating your own definitions of these\nfunctions in a custom compatibility module is also relatively\nstraightforward:\n\n try:\n dict.iteritems\n except AttributeError:\n # Python 3\n def itervalues(d):\n return iter(d.values())\n def iteritems(d):\n return iter(d.items())\n else:\n # Python 2\n def itervalues(d):\n return d.itervalues()\n def iteritems(d):\n return d.iteritems()\n\nThe greatest loss of readability currently arises when converting code\nthat actually needs the list based snapshots that were the default in\nPython 2. This readability loss could likely be mitigated by also\nproviding listvalues and listitems helper functions, allowing the\naffected conversions to be simplified to:\n\n- d.values() -> listvalues(d)\n- d.items() -> listitems(d)\n\nThe corresponding compatibility function definitions are as\nstraightforward as their iterator counterparts:\n\n try:\n dict.iteritems\n except AttributeError:\n # Python 3\n def listvalues(d):\n return list(d.values())\n def listitems(d):\n return list(d.items())\n else:\n # Python 2\n def listvalues(d):\n return d.values()\n def listitems(d):\n return d.items()\n\nWith that expanded set of compatibility functions, Python 2 code would\nthen be converted to \"idiomatic\" hybrid 2/3 code as:\n\n- d.keys() -> list(d)\n- d.values() -> listvalues(d)\n- d.items() -> listitems(d)\n- d.iterkeys() -> iter(d)\n- d.itervalues() -> itervalues(d)\n- d.iteritems() -> iteritems(d)\n\nThis compares well for readability with the idiomatic pure Python 3 code\nthat uses the mapping methods and builtins directly:\n\n- d.keys() -> list(d)\n- d.values() -> list(d.values())\n- d.items() -> list(d.items())\n- d.iterkeys() -> iter(d)\n- d.itervalues() -> iter(d.values())\n- d.iteritems() -> iter(d.items())\n\nIt's also notable that when using this approach, hybrid code would never\ninvoke the mapping methods directly: it would always invoke either a\nbuiltin or helper function instead, in order to ensure the exact same\nsemantics on both Python 2 and 3.\n\nMigrating from Python 3 to the common subset with Python 2.7\n\nWhile the majority of migrations are currently from Python 2 either\ndirectly to Python 3 or to the common subset of Python 2 and Python 3,\nthere are also some migrations of newer projects that start in Python 3\nand then later add Python 2 support, either due to user demand, or to\ngain access to Python 2 libraries that are not yet available in Python 3\n(and porting them to Python 3 or creating a Python 3 compatible\nreplacement is not a trivial exercise).\n\nIn these cases, Python 2.7 compatibility is often sufficient, and the\n2.7+ only view based helper functions provided by future.utils allow the\nbare accesses to the Python 3 mapping view methods to be replaced with\ncode that is compatible with both Python 2.7 and Python 3 (note, this is\nthe only migration chart in the PEP that has Python 3 code on the left\nof the conversion):\n\n- d.keys() -> viewkeys(d)\n- d.values() -> viewvalues(d)\n- d.items() -> viewitems(d)\n- list(d.keys()) -> list(d)\n- list(d.values()) -> listvalues(d)\n- list(d.items()) -> listitems(d)\n- iter(d.keys()) -> iter(d)\n- iter(d.values()) -> itervalues(d)\n- iter(d.items()) -> iteritems(d)\n\nAs with migrations from Python 2 to the common subset, note that the\nhybrid code ends up never invoking the mapping methods directly - it\nonly calls builtins and helper methods, with the latter addressing the\nsemantic differences between Python 2 and Python 3.\n\nPossible changes to Python 3.5+\n\nThe main proposal put forward to potentially aid migration of existing\nPython 2 code to Python 3 is the restoration of some or all of the\nalternate iteration APIs to the Python 3 mapping API. In particular, the\ninitial draft of this PEP proposed making the following conversions\npossible when migrating to the common subset of Python 2 and Python\n3.5+:\n\n- d.keys() -> list(d)\n- d.values() -> list(d.itervalues())\n- d.items() -> list(d.iteritems())\n- d.iterkeys() -> d.iterkeys()\n- d.itervalues() -> d.itervalues()\n- d.iteritems() -> d.iteritems()\n\nPossible mitigations of the additional language complexity in Python 3\ncreated by restoring these methods included immediately deprecating\nthem, as well as potentially hiding them from the dir() function (or\nperhaps even defining a way to make pydoc aware of function\ndeprecations).\n\nHowever, in the case where the list output is actually desired, the end\nresult of that proposal is actually less readable than an appropriately\ndefined helper function, and the function and method forms of the\niterator versions are pretty much equivalent from a readability\nperspective.\n\nSo unless I've missed something critical, readily available listvalues()\nand listitems() helper functions look like they will improve the\nreadability of hybrid code more than anything we could add back to the\nPython 3.5+ mapping API, and won't have any long-term impact on the\ncomplexity of Python 3 itself.\n\nDiscussion\n\nThe fact that 5 years in to the Python 3 migration we still have users\nconsidering the dict API changes a significant barrier to migration\nsuggests that there are problems with previously recommended approaches.\nThis PEP attempts to explore those issues and tries to isolate those\ncases where previous advice (such as it was) could prove problematic.\n\nMy assessment (largely based on feedback from Twisted devs) is that\nproblems are most likely to arise when attempting to use d.keys(),\nd.values(), and d.items() in hybrid code. While superficially it seems\nas though there should be cases where it is safe to ignore the semantic\ndifferences, in practice, the change from \"mutable snapshot\" to \"dynamic\nview\" is significant enough that it is likely better to just force the\nuse of either list or iterator semantics for hybrid code, and leave the\nuse of the view semantics to pure Python 3 code.\n\nThis approach also creates rules that are simple enough and safe enough\nthat it should be possible to automate them in code modernisation\nscripts that target the common subset of Python 2 and Python 3, just as\n2to3 converts them automatically when targeting pure Python 3 code.\n\nAcknowledgements\n\nThanks to the folks at the Twisted sprint table at PyCon for a very\nvigorous discussion of this idea (and several other topics), and\nespecially to Hynek Schlawack for acting as a moderator when things got\na little too heated :)\n\nThanks also to JP Calderone and Itamar Turner-Trauring for their email\nfeedback, as well to the participants in the python-dev review of the\ninitial version of the PEP.\n\nCopyright\n\nThis document has been placed in the public domain."},"source":{"kind":"string","value":"python-peps"},"added":{"kind":"string","value":"2024-10-18T13:23:33.879655"},"created":{"kind":"timestamp","value":"2014-04-18T00:00:00","string":"2014-04-18T00:00:00"},"metadata":{"kind":"string","value":"{\n \"license\": \"Public Domain\",\n \"url\": \"https://peps.python.org/pep-0469/\",\n \"authors\": [\n \"Alyssa Coghlan\"\n ],\n \"pep_number\": \"0469\",\n \"pandoc_version\": \"3.5\"\n}"}}},{"rowIdx":586,"cells":{"id":{"kind":"string","value":"0249"},"text":{"kind":"string","value":"PEP: 249 Title: Python Database API Specification v2.0 Author:\nMarc-André Lemburg Discussions-To: db-sig@python.org\nStatus: Final Type: Informational Content-Type: text/x-rst Created:\n12-Apr-1999 Post-History: Replaces: 248\n\nIntroduction\n\nThis API has been defined to encourage similarity between the Python\nmodules that are used to access databases. By doing this, we hope to\nachieve a consistency leading to more easily understood modules, code\nthat is generally more portable across databases, and a broader reach of\ndatabase connectivity from Python.\n\nComments and questions about this specification may be directed to the\nSIG for Database Interfacing with Python.\n\nFor more information on database interfacing with Python and available\npackages see the Database Topic Guide.\n\nThis document describes the Python Database API Specification 2.0 and a\nset of common optional extensions. The previous version 1.0 version is\nstill available as reference, in PEP 248. Package writers are encouraged\nto use this version of the specification as basis for new interfaces.\n\nModule Interface\n\nConstructors\n\nAccess to the database is made available through connection objects. The\nmodule must provide the following constructor for these:\n\nconnect( parameters... )\n\n Constructor for creating a connection to the database.\n\n Returns a Connection Object. It takes a number of parameters which\n are database dependent.[1]\n\nGlobals\n\nThese module globals must be defined:\n\napilevel\n\n String constant stating the supported DB API level.\n\n Currently only the strings \"1.0\" and \"2.0\" are allowed. If not\n given, a DB-API 1.0 level interface should be assumed.\n\nthreadsafety\n\n Integer constant stating the level of thread safety the interface\n supports. Possible values are:\n\n +--------------+--------------------------------------------------------+\n | threadsafety | Meaning |\n +==============+========================================================+\n | 0 | Threads may not share the module. |\n +--------------+--------------------------------------------------------+\n | 1 | Threads may share the module, but not connections. |\n +--------------+--------------------------------------------------------+\n | 2 | Threads may share the module and connections. |\n +--------------+--------------------------------------------------------+\n | 3 | Threads may share the module, connections and cursors. |\n +--------------+--------------------------------------------------------+\n\n Sharing in the above context means that two threads may use a\n resource without wrapping it using a mutex semaphore to implement\n resource locking. Note that you cannot always make external\n resources thread safe by managing access using a mutex: the resource\n may rely on global variables or other external sources that are\n beyond your control.\n\nparamstyle\n\n String constant stating the type of parameter marker formatting\n expected by the interface. Possible values are[2]:\n\n paramstyle Meaning\n ------------ -----------------------------------------------------------\n qmark Question mark style, e.g. ...WHERE name=?\n numeric Numeric, positional style, e.g. ...WHERE name=:1\n named Named style, e.g. ...WHERE name=:name\n format ANSI C printf format codes, e.g. ...WHERE name=%s\n pyformat Python extended format codes, e.g. ...WHERE name=%(name)s\n\nExceptions\n\nThe module should make all error information available through these\nexceptions or subclasses thereof:\n\nWarning\n\n Exception raised for important warnings like data truncations while\n inserting, etc. It must be a subclass of the Python Exception\n class[3][4].\n\nError\n\n Exception that is the base class of all other error exceptions. You\n can use this to catch all errors with one single except statement.\n Warnings are not considered errors and thus should not use this\n class as base. It must be a subclass of the Python Exception\n class[5].\n\nInterfaceError\n\n Exception raised for errors that are related to the database\n interface rather than the database itself. It must be a subclass of\n Error.\n\nDatabaseError\n\n Exception raised for errors that are related to the database. It\n must be a subclass of Error.\n\nDataError\n\n Exception raised for errors that are due to problems with the\n processed data like division by zero, numeric value out of range,\n etc. It must be a subclass of DatabaseError.\n\nOperationalError\n\n Exception raised for errors that are related to the database's\n operation and not necessarily under the control of the programmer,\n e.g. an unexpected disconnect occurs, the data source name is not\n found, a transaction could not be processed, a memory allocation\n error occurred during processing, etc. It must be a subclass of\n DatabaseError.\n\nIntegrityError\n\n Exception raised when the relational integrity of the database is\n affected, e.g. a foreign key check fails. It must be a subclass of\n DatabaseError.\n\nInternalError\n\n Exception raised when the database encounters an internal error,\n e.g. the cursor is not valid anymore, the transaction is out of\n sync, etc. It must be a subclass of DatabaseError.\n\nProgrammingError\n\n Exception raised for programming errors, e.g. table not found or\n already exists, syntax error in the SQL statement, wrong number of\n parameters specified, etc. It must be a subclass of DatabaseError.\n\nNotSupportedError\n\n Exception raised in case a method or database API was used which is\n not supported by the database, e.g. requesting a .rollback() on a\n connection that does not support transaction or has transactions\n turned off. It must be a subclass of DatabaseError.\n\nThis is the exception inheritance layout[6][7]:\n\n Exception\n |__Warning\n |__Error\n |__InterfaceError\n |__DatabaseError\n |__DataError\n |__OperationalError\n |__IntegrityError\n |__InternalError\n |__ProgrammingError\n |__NotSupportedError\n\nNote\n\nThe values of these exceptions are not defined. They should give the\nuser a fairly good idea of what went wrong, though.\n\nConnection Objects\n\nConnection objects should respond to the following methods.\n\nConnection methods\n\n.close()\n\n Close the connection now (rather than whenever .__del__() is\n called).\n\n The connection will be unusable from this point forward; an Error\n (or subclass) exception will be raised if any operation is attempted\n with the connection. The same applies to all cursor objects trying\n to use the connection. Note that closing a connection without\n committing the changes first will cause an implicit rollback to be\n performed.\n\n.commit()\n\n Commit any pending transaction to the database.\n\n Note that if the database supports an auto-commit feature, this must\n be initially off. An interface method may be provided to turn it\n back on.\n\n Database modules that do not support transactions should implement\n this method with void functionality.\n\n.rollback()\n\n This method is optional since not all databases provide transaction\n support.[8]\n\n In case a database does provide transactions this method causes the\n database to roll back to the start of any pending transaction.\n Closing a connection without committing the changes first will cause\n an implicit rollback to be performed.\n\n.cursor()\n\n Return a new Cursor Object using the connection.\n\n If the database does not provide a direct cursor concept, the module\n will have to emulate cursors using other means to the extent needed\n by this specification.[9]\n\nCursor Objects\n\nThese objects represent a database cursor, which is used to manage the\ncontext of a fetch operation. Cursors created from the same connection\nare not isolated, i.e., any changes done to the database by a cursor are\nimmediately visible by the other cursors. Cursors created from different\nconnections can or can not be isolated, depending on how the transaction\nsupport is implemented (see also the connection's .rollback() and\n.commit() methods).\n\nCursor Objects should respond to the following methods and attributes.\n\nCursor attributes\n\n.description\n\n This read-only attribute is a sequence of 7-item sequences.\n\n Each of these sequences contains information describing one result\n column:\n\n - name\n - type_code\n - display_size\n - internal_size\n - precision\n - scale\n - null_ok\n\n The first two items (name and type_code) are mandatory, the other\n five are optional and are set to None if no meaningful values can be\n provided.\n\n This attribute will be None for operations that do not return rows\n or if the cursor has not had an operation invoked via the\n .execute*() method yet.\n\n The type_code can be interpreted by comparing it to the Type Objects\n specified in the section below.\n\n.rowcount\n\n This read-only attribute specifies the number of rows that the last\n .execute*() produced (for DQL statements like SELECT) or affected\n (for DML statements like UPDATE or INSERT).[10]\n\n The attribute is -1 in case no .execute*() has been performed on the\n cursor or the rowcount of the last operation is cannot be determined\n by the interface.[11]\n\n Note\n\n Future versions of the DB API specification could redefine the\n latter case to have the object return None instead of -1.\n\nCursor methods\n\n.callproc( procname [, parameters ] )\n\n (This method is optional since not all databases provide stored\n procedures.[12])\n\n Call a stored database procedure with the given name. The sequence\n of parameters must contain one entry for each argument that the\n procedure expects. The result of the call is returned as modified\n copy of the input sequence. Input parameters are left untouched,\n output and input/output parameters replaced with possibly new\n values.\n\n The procedure may also provide a result set as output. This must\n then be made available through the standard .fetch*() methods.\n\n.close()\n\n Close the cursor now (rather than whenever __del__ is called).\n\n The cursor will be unusable from this point forward; an Error (or\n subclass) exception will be raised if any operation is attempted\n with the cursor.\n\n.execute(operation [, parameters])\n\n Prepare and execute a database operation (query or command).\n\n Parameters may be provided as sequence or mapping and will be bound\n to variables in the operation. Variables are specified in a\n database-specific notation (see the module's paramstyle attribute\n for details).[13]\n\n A reference to the operation will be retained by the cursor. If the\n same operation object is passed in again, then the cursor can\n optimize its behavior. This is most effective for algorithms where\n the same operation is used, but different parameters are bound to it\n (many times).\n\n For maximum efficiency when reusing an operation, it is best to use\n the .setinputsizes() method to specify the parameter types and sizes\n ahead of time. It is legal for a parameter to not match the\n predefined information; the implementation should compensate,\n possibly with a loss of efficiency.\n\n The parameters may also be specified as list of tuples to e.g.\n insert multiple rows in a single operation, but this kind of usage\n is deprecated: .executemany() should be used instead.\n\n Return values are not defined.\n\n.executemany( operation, seq_of_parameters )\n\n Prepare a database operation (query or command) and then execute it\n against all parameter sequences or mappings found in the sequence\n seq_of_parameters.\n\n Modules are free to implement this method using multiple calls to\n the .execute() method or by using array operations to have the\n database process the sequence as a whole in one call.\n\n Use of this method for an operation which produces one or more\n result sets constitutes undefined behavior, and the implementation\n is permitted (but not required) to raise an exception when it\n detects that a result set has been created by an invocation of the\n operation.\n\n The same comments as for .execute() also apply accordingly to this\n method.\n\n Return values are not defined.\n\n.fetchone()\n\n Fetch the next row of a query result set, returning a single\n sequence, or None when no more data is available.[14]\n\n An Error (or subclass) exception is raised if the previous call to\n .execute*() did not produce any result set or no call was issued\n yet.\n\n.fetchmany([size=cursor.arraysize])\n\n Fetch the next set of rows of a query result, returning a sequence\n of sequences (e.g. a list of tuples). An empty sequence is returned\n when no more rows are available.\n\n The number of rows to fetch per call is specified by the parameter.\n If it is not given, the cursor's arraysize determines the number of\n rows to be fetched. The method should try to fetch as many rows as\n indicated by the size parameter. If this is not possible due to the\n specified number of rows not being available, fewer rows may be\n returned.\n\n An Error (or subclass) exception is raised if the previous call to\n .execute*() did not produce any result set or no call was issued\n yet.\n\n Note there are performance considerations involved with the size\n parameter. For optimal performance, it is usually best to use the\n .arraysize attribute. If the size parameter is used, then it is best\n for it to retain the same value from one .fetchmany() call to the\n next.\n\n.fetchall()\n\n Fetch all (remaining) rows of a query result, returning them as a\n sequence of sequences (e.g. a list of tuples). Note that the\n cursor's arraysize attribute can affect the performance of this\n operation.\n\n An Error (or subclass) exception is raised if the previous call to\n .execute*() did not produce any result set or no call was issued\n yet.\n\n.nextset()\n\n (This method is optional since not all databases support multiple\n result sets.[15])\n\n This method will make the cursor skip to the next available set,\n discarding any remaining rows from the current set.\n\n If there are no more sets, the method returns None. Otherwise, it\n returns a true value and subsequent calls to the .fetch*() methods\n will return rows from the next result set.\n\n An Error (or subclass) exception is raised if the previous call to\n .execute*() did not produce any result set or no call was issued\n yet.\n\n.arraysize\n\n This read/write attribute specifies the number of rows to fetch at a\n time with .fetchmany(). It defaults to 1 meaning to fetch a single\n row at a time.\n\n Implementations must observe this value with respect to the\n .fetchmany() method, but are free to interact with the database a\n single row at a time. It may also be used in the implementation of\n .executemany().\n\n.setinputsizes(sizes)\n\n This can be used before a call to .execute*() to predefine memory\n areas for the operation's parameters.\n\n sizes is specified as a sequence — one item for each input\n parameter. The item should be a Type Object that corresponds to the\n input that will be used, or it should be an integer specifying the\n maximum length of a string parameter. If the item is None, then no\n predefined memory area will be reserved for that column (this is\n useful to avoid predefined areas for large inputs).\n\n This method would be used before the .execute*() method is invoked.\n\n Implementations are free to have this method do nothing and users\n are free to not use it.\n\n.setoutputsize(size [, column])\n\n Set a column buffer size for fetches of large columns (e.g. LONGs,\n BLOBs, etc.). The column is specified as an index into the result\n sequence. Not specifying the column will set the default size for\n all large columns in the cursor.\n\n This method would be used before the .execute*() method is invoked.\n\n Implementations are free to have this method do nothing and users\n are free to not use it.\n\nType Objects and Constructors\n\nMany databases need to have the input in a particular format for binding\nto an operation's input parameters. For example, if an input is destined\nfor a DATE column, then it must be bound to the database in a particular\nstring format. Similar problems exist for \"Row ID\" columns or large\nbinary items (e.g. blobs or RAW columns). This presents problems for\nPython since the parameters to the .execute*() method are untyped. When\nthe database module sees a Python string object, it doesn't know if it\nshould be bound as a simple CHAR column, as a raw BINARY item, or as a\nDATE.\n\nTo overcome this problem, a module must provide the constructors defined\nbelow to create objects that can hold special values. When passed to the\ncursor methods, the module can then detect the proper type of the input\nparameter and bind it accordingly.\n\nA Cursor Object's description attribute returns information about each\nof the result columns of a query. The type_code must compare equal to\none of Type Objects defined below. Type Objects may be equal to more\nthan one type code (e.g. DATETIME could be equal to the type codes for\ndate, time and timestamp columns; see the Implementation Hints below for\ndetails).\n\nThe module exports the following constructors and singletons:\n\nDate(year, month, day)\n\n This function constructs an object holding a date value.\n\nTime(hour, minute, second)\n\n This function constructs an object holding a time value.\n\nTimestamp(year, month, day, hour, minute, second)\n\n This function constructs an object holding a time stamp value.\n\nDateFromTicks(ticks)\n\n This function constructs an object holding a date value from the\n given ticks value (number of seconds since the epoch; see the\n documentation of the standard Python time module for details).\n\nTimeFromTicks(ticks)\n\n This function constructs an object holding a time value from the\n given ticks value (number of seconds since the epoch; see the\n documentation of the standard Python time module for details).\n\nTimestampFromTicks(ticks)\n\n This function constructs an object holding a time stamp value from\n the given ticks value (number of seconds since the epoch; see the\n documentation of the standard Python time module for details).\n\nBinary(string)\n\n This function constructs an object capable of holding a binary\n (long) string value.\n\nSTRING type\n\n This type object is used to describe columns in a database that are\n string-based (e.g. CHAR).\n\nBINARY type\n\n This type object is used to describe (long) binary columns in a\n database (e.g. LONG, RAW, BLOBs).\n\nNUMBER type\n\n This type object is used to describe numeric columns in a database.\n\nDATETIME type\n\n This type object is used to describe date/time columns in a\n database.\n\nROWID type\n\n This type object is used to describe the \"Row ID\" column in a\n database.\n\nSQL NULL values are represented by the Python None singleton on input\nand output.\n\nNote\n\nUsage of Unix ticks for database interfacing can cause troubles because\nof the limited date range they cover.\n\nImplementation Hints for Module Authors\n\n- Date/time objects can be implemented as Python datetime module\n objects (available since Python 2.3, with a C API since 2.4) or\n using the mxDateTime package (available for all Python versions\n since 1.5.2). They both provide all necessary constructors and\n methods at Python and C level.\n\n- Here is a sample implementation of the Unix ticks based constructors\n for date/time delegating work to the generic constructors:\n\n import time\n\n def DateFromTicks(ticks):\n return Date(*time.localtime(ticks)[:3])\n\n def TimeFromTicks(ticks):\n return Time(*time.localtime(ticks)[3:6])\n\n def TimestampFromTicks(ticks):\n return Timestamp(*time.localtime(ticks)[:6])\n\n- The preferred object type for Binary objects are the buffer types\n available in standard Python starting with version 1.5.2. Please see\n the Python documentation for details. For information about the C\n interface have a look at Include/bufferobject.h and\n Objects/bufferobject.c in the Python source distribution.\n\n- This Python class allows implementing the above type objects even\n though the description type code field yields multiple values for on\n type object:\n\n class DBAPITypeObject:\n def __init__(self,*values):\n self.values = values\n def __cmp__(self,other):\n if other in self.values:\n return 0\n if other < self.values:\n return 1\n else:\n return -1\n\n The resulting type object compares equal to all values passed to the\n constructor.\n\n- Here is a snippet of Python code that implements the exception\n hierarchy defined above[16]:\n\n class Error(Exception):\n pass\n\n class Warning(Exception):\n pass\n\n class InterfaceError(Error):\n pass\n\n class DatabaseError(Error):\n pass\n\n class InternalError(DatabaseError):\n pass\n\n class OperationalError(DatabaseError):\n pass\n\n class ProgrammingError(DatabaseError):\n pass\n\n class IntegrityError(DatabaseError):\n pass\n\n class DataError(DatabaseError):\n pass\n\n class NotSupportedError(DatabaseError):\n pass\n\n In C you can use the PyErr_NewException(fullname, base, NULL) API to\n create the exception objects.\n\nOptional DB API Extensions\n\nDuring the lifetime of DB API 2.0, module authors have often extended\ntheir implementations beyond what is required by this DB API\nspecification. To enhance compatibility and to provide a clean upgrade\npath to possible future versions of the specification, this section\ndefines a set of common extensions to the core DB API 2.0 specification.\n\nAs with all DB API optional features, the database module authors are\nfree to not implement these additional attributes and methods (using\nthem will then result in an AttributeError) or to raise a\nNotSupportedError in case the availability can only be checked at\nrun-time.\n\nIt has been proposed to make usage of these extensions optionally\nvisible to the programmer by issuing Python warnings through the Python\nwarning framework. To make this feature useful, the warning messages\nmust be standardized in order to be able to mask them. These standard\nmessages are referred to below as Warning Message.\n\nCursor.rownumber\n\n This read-only attribute should provide the current 0-based index of\n the cursor in the result set or None if the index cannot be\n determined.\n\n The index can be seen as index of the cursor in a sequence (the\n result set). The next fetch operation will fetch the row indexed by\n .rownumber in that sequence.\n\n Warning Message: \"DB-API extension cursor.rownumber used\"\n\nConnection.Error, Connection.ProgrammingError, etc.\n\n All exception classes defined by the DB API standard should be\n exposed on the Connection objects as attributes (in addition to\n being available at module scope).\n\n These attributes simplify error handling in multi-connection\n environments.\n\n Warning Message: \"DB-API extension connection. used\"\n\nCursor.connection\n\n This read-only attribute return a reference to the Connection object\n on which the cursor was created.\n\n The attribute simplifies writing polymorph code in multi-connection\n environments.\n\n Warning Message: \"DB-API extension cursor.connection used\"\n\nCursor.scroll(value [, mode='relative' ])\n\n Scroll the cursor in the result set to a new position according to\n mode.\n\n If mode is relative (default), value is taken as offset to the\n current position in the result set, if set to absolute, value states\n an absolute target position.\n\n An IndexError should be raised in case a scroll operation would\n leave the result set. In this case, the cursor position is left\n undefined (ideal would be to not move the cursor at all).\n\n Note\n\n This method should use native scrollable cursors, if available, or\n revert to an emulation for forward-only scrollable cursors. The\n method may raise NotSupportedError to signal that a specific\n operation is not supported by the database (e.g. backward\n scrolling).\n\n Warning Message: \"DB-API extension cursor.scroll() used\"\n\nCursor.messages\n\n This is a Python list object to which the interface appends tuples\n (exception class, exception value) for all messages which the\n interfaces receives from the underlying database for this cursor.\n\n The list is cleared by all standard cursor methods calls (prior to\n executing the call) except for the .fetch*() calls automatically to\n avoid excessive memory usage and can also be cleared by executing\n del cursor.messages[:].\n\n All error and warning messages generated by the database are placed\n into this list, so checking the list allows the user to verify\n correct operation of the method calls.\n\n The aim of this attribute is to eliminate the need for a Warning\n exception which often causes problems (some warnings really only\n have informational character).\n\n Warning Message: \"DB-API extension cursor.messages used\"\n\nConnection.messages\n\n Same as Cursor.messages except that the messages in the list are\n connection oriented.\n\n The list is cleared automatically by all standard connection methods\n calls (prior to executing the call) to avoid excessive memory usage\n and can also be cleared by executing del connection.messages[:].\n\n Warning Message: \"DB-API extension connection.messages used\"\n\nCursor.next()\n\n Return the next row from the currently executing SQL statement using\n the same semantics as .fetchone(). A StopIteration exception is\n raised when the result set is exhausted for Python versions 2.2 and\n later. Previous versions don't have the StopIteration exception and\n so the method should raise an IndexError instead.\n\n Warning Message: \"DB-API extension cursor.next() used\"\n\nCursor.__iter__()\n\n Return self to make cursors compatible to the iteration protocol\n [17].\n\n Warning Message: \"DB-API extension cursor.__iter__() used\"\n\nCursor.lastrowid\n\n This read-only attribute provides the rowid of the last modified row\n (most databases return a rowid only when a single INSERT operation\n is performed). If the operation does not set a rowid or if the\n database does not support rowids, this attribute should be set to\n None.\n\n The semantics of .lastrowid are undefined in case the last executed\n statement modified more than one row, e.g. when using INSERT with\n .executemany().\n\n Warning Message: \"DB-API extension cursor.lastrowid used\"\n\nConnection.autocommit\n\n Attribute to query and set the autocommit mode of the connection.\n\n Return True if the connection is operating in autocommit\n (non-transactional) mode. Return False if the connection is\n operating in manual commit (transactional) mode.\n\n Setting the attribute to True or False adjusts the connection's mode\n accordingly.\n\n Changing the setting from True to False (disabling autocommit) will\n have the database leave autocommit mode and start a new transaction.\n Changing from False to True (enabling autocommit) has database\n dependent semantics with respect to how pending transactions are\n handled.[18]\n\n Deprecation notice: Even though several database modules implement\n both the read and write nature of this attribute, setting the\n autocommit mode by writing to the attribute is deprecated, since\n this may result in I/O and related exceptions, making it difficult\n to implement in an async context.[19]\n\n Warning Message: \"DB-API extension connection.autocommit used\"\n\nOptional Error Handling Extensions\n\nThe core DB API specification only introduces a set of exceptions which\ncan be raised to report errors to the user. In some cases, exceptions\nmay be too disruptive for the flow of a program or even render execution\nimpossible.\n\nFor these cases and in order to simplify error handling when dealing\nwith databases, database module authors may choose to implement user\ndefinable error handlers. This section describes a standard way of\ndefining these error handlers.\n\nConnection.errorhandler, Cursor.errorhandler\n\n Read/write attribute which references an error handler to call in\n case an error condition is met.\n\n The handler must be a Python callable taking the following\n arguments:\n\n errorhandler(connection, cursor, errorclass, errorvalue)\n\n where connection is a reference to the connection on which the\n cursor operates, cursor a reference to the cursor (or None in case\n the error does not apply to a cursor), errorclass is an error class\n which to instantiate using errorvalue as construction argument.\n\n The standard error handler should add the error information to the\n appropriate .messages attribute (Connection.messages or\n Cursor.messages) and raise the exception defined by the given\n errorclass and errorvalue parameters.\n\n If no .errorhandler is set (the attribute is None), the standard\n error handling scheme as outlined above, should be applied.\n\n Warning Message: \"DB-API extension .errorhandler used\"\n\nCursors should inherit the .errorhandler setting from their connection\nobjects at cursor creation time.\n\nOptional Two-Phase Commit Extensions\n\nMany databases have support for two-phase commit (TPC) which allows\nmanaging transactions across multiple database connections and other\nresources.\n\nIf a database backend provides support for two-phase commit and the\ndatabase module author wishes to expose this support, the following API\nshould be implemented. NotSupportedError should be raised, if the\ndatabase backend support for two-phase commit can only be checked at\nrun-time.\n\nTPC Transaction IDs\n\nAs many databases follow the XA specification, transaction IDs are\nformed from three components:\n\n- a format ID\n- a global transaction ID\n- a branch qualifier\n\nFor a particular global transaction, the first two components should be\nthe same for all resources. Each resource in the global transaction\nshould be assigned a different branch qualifier.\n\nThe various components must satisfy the following criteria:\n\n- format ID: a non-negative 32-bit integer.\n- global transaction ID and branch qualifier: byte strings no longer\n than 64 characters.\n\nTransaction IDs are created with the .xid() Connection method:\n\n.xid(format_id, global_transaction_id, branch_qualifier)\n\n Returns a transaction ID object suitable for passing to the .tpc_*()\n methods of this connection.\n\n If the database connection does not support TPC, a NotSupportedError\n is raised.\n\n The type of the object returned by .xid() is not defined, but it\n must provide sequence behaviour, allowing access to the three\n components. A conforming database module could choose to represent\n transaction IDs with tuples rather than a custom object.\n\nTPC Connection Methods\n\n.tpc_begin(xid)\n\n Begins a TPC transaction with the given transaction ID xid.\n\n This method should be called outside of a transaction (i.e. nothing\n may have executed since the last .commit() or .rollback()).\n\n Furthermore, it is an error to call .commit() or .rollback() within\n the TPC transaction. A ProgrammingError is raised, if the\n application calls .commit() or .rollback() during an active TPC\n transaction.\n\n If the database connection does not support TPC, a NotSupportedError\n is raised.\n\n.tpc_prepare()\n\n Performs the first phase of a transaction started with .tpc_begin().\n A ProgrammingError should be raised if this method outside of a TPC\n transaction.\n\n After calling .tpc_prepare(), no statements can be executed until\n .tpc_commit() or .tpc_rollback() have been called.\n\n.tpc_commit([ xid ])\n\n When called with no arguments, .tpc_commit() commits a TPC\n transaction previously prepared with .tpc_prepare().\n\n If .tpc_commit() is called prior to .tpc_prepare(), a single phase\n commit is performed. A transaction manager may choose to do this if\n only a single resource is participating in the global transaction.\n\n When called with a transaction ID xid, the database commits the\n given transaction. If an invalid transaction ID is provided, a\n ProgrammingError will be raised. This form should be called outside\n of a transaction, and is intended for use in recovery.\n\n On return, the TPC transaction is ended.\n\n.tpc_rollback([ xid ])\n\n When called with no arguments, .tpc_rollback() rolls back a TPC\n transaction. It may be called before or after .tpc_prepare().\n\n When called with a transaction ID xid, it rolls back the given\n transaction. If an invalid transaction ID is provided, a\n ProgrammingError is raised. This form should be called outside of a\n transaction, and is intended for use in recovery.\n\n On return, the TPC transaction is ended.\n\n.tpc_recover()\n\n Returns a list of pending transaction IDs suitable for use with\n .tpc_commit(xid) or .tpc_rollback(xid).\n\n If the database does not support transaction recovery, it may return\n an empty list or raise NotSupportedError.\n\nFrequently Asked Questions\n\nThe database SIG often sees reoccurring questions about the DB API\nspecification. This section covers some of the issues people sometimes\nhave with the specification.\n\nQuestion:\n\nHow can I construct a dictionary out of the tuples returned by\n.fetch*():\n\nAnswer:\n\nThere are several existing tools available which provide helpers for\nthis task. Most of them use the approach of using the column names\ndefined in the cursor attribute .description as basis for the keys in\nthe row dictionary.\n\nNote that the reason for not extending the DB API specification to also\nsupport dictionary return values for the .fetch*() methods is that this\napproach has several drawbacks:\n\n- Some databases don't support case-sensitive column names or\n auto-convert them to all lowercase or all uppercase characters.\n- Columns in the result set which are generated by the query (e.g.\n using SQL functions) don't map to table column names and databases\n usually generate names for these columns in a very database specific\n way.\n\nAs a result, accessing the columns through dictionary keys varies\nbetween databases and makes writing portable code impossible.\n\nMajor Changes from Version 1.0 to Version 2.0\n\nThe Python Database API 2.0 introduces a few major changes compared to\nthe 1.0 version. Because some of these changes will cause existing DB\nAPI 1.0 based scripts to break, the major version number was adjusted to\nreflect this change.\n\nThese are the most important changes from 1.0 to 2.0:\n\n- The need for a separate dbi module was dropped and the functionality\n merged into the module interface itself.\n- New constructors and Type Objects were added for date/time values,\n the RAW Type Object was renamed to BINARY. The resulting set should\n cover all basic data types commonly found in modern SQL databases.\n- New constants (apilevel, threadsafety, paramstyle) and methods\n (.executemany(), .nextset()) were added to provide better database\n bindings.\n- The semantics of .callproc() needed to call stored procedures are\n now clearly defined.\n- The definition of the .execute() return value changed. Previously,\n the return value was based on the SQL statement type (which was hard\n to implement right) — it is undefined now; use the more flexible\n .rowcount attribute instead. Modules are free to return the old\n style return values, but these are no longer mandated by the\n specification and should be considered database interface dependent.\n- Class based exceptions were incorporated into the specification.\n Module implementors are free to extend the exception layout defined\n in this specification by subclassing the defined exception classes.\n\nPost-publishing additions to the DB API 2.0 specification:\n\n- Additional optional DB API extensions to the set of core\n functionality were specified.\n\nOpen Issues\n\nAlthough the version 2.0 specification clarifies a lot of questions that\nwere left open in the 1.0 version, there are still some remaining issues\nwhich should be addressed in future versions:\n\n- Define a useful return value for .nextset() for the case where a new\n result set is available.\n- Integrate the decimal module Decimal object for use as loss-less\n monetary and decimal interchange format.\n\nFootnotes\n\nAcknowledgements\n\nMany thanks go to Andrew Kuchling who converted the Python Database API\nSpecification 2.0 from the original HTML format into the PEP format in\n2001.\n\nMany thanks to James Henstridge for leading the discussion which led to\nthe standardization of the two-phase commit API extensions in 2008.\n\nMany thanks to Daniele Varrazzo for converting the specification from\ntext PEP format to ReST PEP format, which allows linking to various\nparts in 2012.\n\nCopyright\n\nThis document has been placed in the Public Domain.\n\n[1] As a guideline the connection constructor parameters should be\nimplemented as keyword parameters for more intuitive use and follow this\norder of parameters:\n\n Parameter Meaning\n ----------- --------------------------------\n dsn Data source name as string\n user User name as string (optional)\n password Password as string (optional)\n host Hostname (optional)\n database Database name (optional)\n\nE.g. a connect could look like this:\n\n connect(dsn='myhost:MYDB', user='guido', password='234$')\n\nAlso see regarding planned future additions to this list.\n\n[2] Module implementors should prefer numeric, named or pyformat over\nthe other formats because these offer more clarity and flexibility.\n\n[3] In Python 2 and earlier versions of this PEP, StandardError was used\nas the base class for all DB-API exceptions. Since StandardError was\nremoved in Python 3, database modules targeting Python 3 should use\nException as base class instead. The PEP was updated to use Exception\nthroughout the text, to avoid confusion. The change should not affect\nexisting modules or uses of those modules, since all DB-API error\nexception classes are still rooted at the Error or Warning classes.\n\n[4] In a future revision of the DB-API, the base class for Warning will\nlikely change to the builtin Warning class. At the time of writing of\nthe DB-API 2.0 in 1999, the warning framework in Python did not yet\nexist.\n\n[5] In Python 2 and earlier versions of this PEP, StandardError was used\nas the base class for all DB-API exceptions. Since StandardError was\nremoved in Python 3, database modules targeting Python 3 should use\nException as base class instead. The PEP was updated to use Exception\nthroughout the text, to avoid confusion. The change should not affect\nexisting modules or uses of those modules, since all DB-API error\nexception classes are still rooted at the Error or Warning classes.\n\n[6] In Python 2 and earlier versions of this PEP, StandardError was used\nas the base class for all DB-API exceptions. Since StandardError was\nremoved in Python 3, database modules targeting Python 3 should use\nException as base class instead. The PEP was updated to use Exception\nthroughout the text, to avoid confusion. The change should not affect\nexisting modules or uses of those modules, since all DB-API error\nexception classes are still rooted at the Error or Warning classes.\n\n[7] In a future revision of the DB-API, the base class for Warning will\nlikely change to the builtin Warning class. At the time of writing of\nthe DB-API 2.0 in 1999, the warning framework in Python did not yet\nexist.\n\n[8] If the database does not support the functionality required by the\nmethod, the interface should throw an exception in case the method is\nused.\n\nThe preferred approach is to not implement the method and thus have\nPython generate an AttributeError in case the method is requested. This\nallows the programmer to check for database capabilities using the\nstandard hasattr() function.\n\nFor some dynamically configured interfaces it may not be appropriate to\nrequire dynamically making the method available. These interfaces should\nthen raise a NotSupportedError to indicate the non-ability to perform\nthe roll back when the method is invoked.\n\n[9] A database interface may choose to support named cursors by allowing\na string argument to the method. This feature is not part of the\nspecification, since it complicates semantics of the .fetch*() methods.\n\n[10] The term number of affected rows generally refers to the number of\nrows deleted, updated or inserted by the last statement run on the\ndatabase cursor. Most databases will return the total number of rows\nthat were found by the corresponding WHERE clause of the statement. Some\ndatabases use a different interpretation for UPDATEs and only return the\nnumber of rows that were changed by the UPDATE, even though the WHERE\nclause of the statement may have found more matching rows. Database\nmodule authors should try to implement the more common interpretation of\nreturning the total number of rows found by the WHERE clause, or clearly\ndocument a different interpretation of the .rowcount attribute.\n\n[11] The rowcount attribute may be coded in a way that updates its value\ndynamically. This can be useful for databases that return usable\nrowcount values only after the first call to a .fetch*() method.\n\n[12] If the database does not support the functionality required by the\nmethod, the interface should throw an exception in case the method is\nused.\n\nThe preferred approach is to not implement the method and thus have\nPython generate an AttributeError in case the method is requested. This\nallows the programmer to check for database capabilities using the\nstandard hasattr() function.\n\nFor some dynamically configured interfaces it may not be appropriate to\nrequire dynamically making the method available. These interfaces should\nthen raise a NotSupportedError to indicate the non-ability to perform\nthe roll back when the method is invoked.\n\n[13] The module will use the __getitem__ method of the parameters object\nto map either positions (integers) or names (strings) to parameter\nvalues. This allows for both sequences and mappings to be used as input.\n\nThe term bound refers to the process of binding an input value to a\ndatabase execution buffer. In practical terms, this means that the input\nvalue is directly used as a value in the operation. The client should\nnot be required to \"escape\" the value so that it can be used — the value\nshould be equal to the actual database value.\n\n[14] Note that the interface may implement row fetching using arrays and\nother optimizations. It is not guaranteed that a call to this method\nwill only move the associated cursor forward by one row.\n\n[15] If the database does not support the functionality required by the\nmethod, the interface should throw an exception in case the method is\nused.\n\nThe preferred approach is to not implement the method and thus have\nPython generate an AttributeError in case the method is requested. This\nallows the programmer to check for database capabilities using the\nstandard hasattr() function.\n\nFor some dynamically configured interfaces it may not be appropriate to\nrequire dynamically making the method available. These interfaces should\nthen raise a NotSupportedError to indicate the non-ability to perform\nthe roll back when the method is invoked.\n\n[16] In Python 2 and earlier versions of this PEP, StandardError was\nused as the base class for all DB-API exceptions. Since StandardError\nwas removed in Python 3, database modules targeting Python 3 should use\nException as base class instead. The PEP was updated to use Exception\nthroughout the text, to avoid confusion. The change should not affect\nexisting modules or uses of those modules, since all DB-API error\nexception classes are still rooted at the Error or Warning classes.\n\n[17] Implementation Note: Python C extensions will have to implement the\ntp_iter slot on the cursor object instead of the .__iter__() method.\n\n[18] Many database modules implementing the autocommit attribute will\nautomatically commit any pending transaction and then enter autocommit\nmode. It is generally recommended to explicitly .commit() or .rollback()\ntransactions prior to changing the autocommit setting, since this is\nportable across database modules.\n\n[19] In a future revision of the DB-API, we are going to introduce a new\nmethod .setautocommit(value), which will allow setting the autocommit\nmode, and make .autocommit a read-only attribute. Additionally, we are\nconsidering to add a new standard keyword parameter autocommit to the\nConnection constructor. Modules authors are encouraged to add these\nchanges in preparation for this change."},"source":{"kind":"string","value":"python-peps"},"added":{"kind":"string","value":"2024-10-18T13:23:33.982604"},"created":{"kind":"timestamp","value":"1999-04-12T00:00:00","string":"1999-04-12T00:00:00"},"metadata":{"kind":"string","value":"{\n \"license\": \"Public Domain\",\n \"url\": \"https://peps.python.org/pep-0249/\",\n \"authors\": [\n \"Marc-André Lemburg\"\n ],\n \"pep_number\": \"0249\",\n \"pandoc_version\": \"3.5\"\n}"}}},{"rowIdx":587,"cells":{"id":{"kind":"string","value":"0628"},"text":{"kind":"string","value":"PEP: 628 Title: Add math.tau Version: $Revision$ Last-Modified: $Date$\nAuthor: Alyssa Coghlan Status: Final Type:\nStandards Track Content-Type: text/x-rst Created: 28-Jun-2011\nPython-Version: 3.6 Post-History: 28-Jun-2011\n\nAbstract\n\nIn honour of Tau Day 2011, this PEP proposes the addition of the circle\nconstant math.tau to the Python standard library.\n\nThe concept of tau (τ) is based on the observation that the ratio of a\ncircle's circumference to its radius is far more fundamental and\ninteresting than the ratio between its circumference and diameter. It is\nsimply a matter of assigning a name to the value 2 * pi (2π).\n\nPEP Acceptance\n\nThis PEP is now accepted and math.tau will be a part of Python 3.6.\nHappy birthday Alyssa!\n\nThe idea in this PEP has been implemented in the auspiciously named\nissue 12345.\n\nThe Rationale for Tau\n\npi is defined as the ratio of a circle's circumference to its diameter.\nHowever, a circle is defined by its centre point and its radius. This is\nshown clearly when we note that the parameter of integration to go from\na circle's circumference to its area is the radius, not the diameter. If\nwe use the diameter instead we have to divide by four to get rid of the\nextraneous multiplier.\n\nWhen working with radians, it is trivial to convert any given fraction\nof a circle to a value in radians in terms of tau. A quarter circle is\ntau/4, a half circle is tau/2, seven 25ths is 7*tau/25, etc. In contrast\nwith the equivalent expressions in terms of pi (pi/2, pi, 14*pi/25), the\nunnecessary and needlessly confusing multiplication by two is gone.\n\nOther Resources\n\nI've barely skimmed the surface of the many examples put forward to\npoint out just how much easier and more sensible many aspects of\nmathematics become when conceived in terms of tau rather than pi. If you\ndon't find my specific examples sufficiently persuasive, here are some\nmore resources that may be of interest:\n\n- Michael Hartl is the primary instigator of Tau Day in his Tau\n Manifesto\n- Bob Palais, the author of the original mathematics journal article\n highlighting the problems with pi has a page of resources on the\n topic\n- For those that prefer videos to written text, Pi is wrong! and Pi is\n (still) wrong are available on YouTube\n\nCopyright\n\nThis document has been placed in the public domain."},"source":{"kind":"string","value":"python-peps"},"added":{"kind":"string","value":"2024-10-18T13:23:33.993605"},"created":{"kind":"timestamp","value":"2011-06-28T00:00:00","string":"2011-06-28T00:00:00"},"metadata":{"kind":"string","value":"{\n \"license\": \"Public Domain\",\n \"url\": \"https://peps.python.org/pep-0628/\",\n \"authors\": [\n \"Alyssa Coghlan\"\n ],\n \"pep_number\": \"0628\",\n \"pandoc_version\": \"3.5\"\n}"}}},{"rowIdx":588,"cells":{"id":{"kind":"string","value":"0660"},"text":{"kind":"string","value":"PEP: 660 Title: Editable installs for pyproject.toml based builds (wheel\nbased) Author: Daniel Holth , Stéphane Bidoul\n Sponsor: Paul Moore \nDiscussions-To:\nhttps://discuss.python.org/t/draft-pep-editable-installs-for-pep-517-style-build-backends/8510\nStatus: Final Type: Standards Track Topic: Packaging Content-Type:\ntext/x-rst Created: 30-Mar-2021 Post-History: Resolution:\nhttps://discuss.python.org/t/pronouncement-on-peps-660-and-662-editable-installs/9450\n\nAbstract\n\nThis document describes a PEP 517 style method for the installation of\npackages in editable mode.\n\nMotivation\n\nPython programmers want to be able to develop packages without having to\ninstall (i.e. copy) them into site-packages, for example, by working in\na checkout of the source repository.\n\nWhile this can be done by adding the relevant source directories to\nPYTHONPATH, setuptools provides the setup.py develop mechanism that\nmakes the process easier, and also installs dependencies and entry\npoints such as console scripts. pip exposes this mechanism via its\npip install --editable option.\n\nThe installation of projects in such a way that the python code being\nimported remains in the source directory is known as the editable\ninstallation mode.\n\nNow that PEP 517 provides a mechanism to create alternatives to\nsetuptools, and decouple installation front ends from build backends, we\nneed a new mechanism to install packages in editable mode.\n\nRationale\n\nPEP 517 deferred \"Editable installs\", meaning non-setup.py distributions\nlacked that feature. The only way to retain editable installs for these\ndistributions was to provide a compatible setup.py develop\nimplementation. By defining an editable hook other build frontends gain\nparity with setup.py.\n\nTerminology and goals\n\nThe editable installation mode implies that the source code of the\nproject being installed is available in a local directory.\n\nOnce the project is installed in editable mode, users expect that\nchanges to the project python code in the local source tree become\neffective without the need of a new installation step.\n\nSome kind of changes, such as the addition or modification of entry\npoints, or the addition of new dependencies, require a new installation\nstep to become effective. These changes are typically made in build\nbackend configuration files (such as pyproject.toml), so it is\nconsistent with the general user expectation that python source code is\nimported from the source tree.\n\nThe modification of non-python source code such a C extension modules\nobviously require a compilation and/or installation step to become\neffective. The exact steps to perform will remain specific to the build\nbackend used.\n\nWhen a project is installed in editable mode, users expect the\ninstallation to behave identically as a regular installation. In\nparticular the code must be importable by other code, and metadata must\nbe available to standard mechanisms such as importlib.metadata.\n\nDepending on the way build backends implement this specification, some\nminor differences may be visible such as the presence of additional\nfiles that are in the source tree and would not be part of a regular\ninstall. Build backends are encouraged to document such potential\ndifferences.\n\nThe Mechanism\n\nThis PEP adds three optional hooks to the PEP 517 backend interface.\nThese hooks are used to build a wheel that, when installed, allows that\ndistribution to be imported from its source folder.\n\nbuild_editable\n\n def build_editable(wheel_directory, config_settings=None, metadata_directory=None):\n ...\n\nMust build a .whl file, and place it in the specified wheel_directory.\nIt must return the basename (not the full path) of the .whl file it\ncreates, as a unicode string.\n\nMay do an in-place build of the distribution as a side effect so that\nany extension modules or other built artifacts are ready to be used.\n\nThe .whl file must comply with the Wheel binary file format\nspecification (PEP 427). In particular it must contain a compliant\n.dist-info directory. Metadata must be identical as the one that would\nhave been produced by build_wheel or prepare_metadata_for_build_wheel,\nexcept for Requires-Dist which may differ slightly as explained below.\n\nBuild-backends must produce wheels that have the same dependencies\n(Requires-Dist metadata) as wheels produced by the build_wheel hook,\nwith the exception that they can add dependencies necessary for their\neditable mechanism to function at runtime (such as editables).\n\nThe filename for the \"editable\" wheel needs to be PEP 427 compliant too.\nIt does not need to use the same tags as build_wheel but it must be\ntagged as compatible with the system.\n\nIf the build frontend has previously called\nprepare_metadata_for_build_editable and depends on the wheel resulting\nfrom this call to have metadata matching this earlier call, then it\nshould provide the path to the created .dist-info directory as the\nmetadata_directory argument. If this argument is provided, then\nbuild_editable MUST produce a wheel with identical metadata. The\ndirectory passed in by the build frontend MUST be identical to the\ndirectory created by prepare_metadata_for_build_editable, including any\nunrecognized files it created.\n\nAn \"editable\" wheel uses the wheel format not for distribution but as\nephemeral communication between the build system and the front end. This\navoids having the build backend install anything directly. This wheel\nmust not be exposed to end users, nor cached, nor distributed.\n\nget_requires_for_build_editable\n\n def get_requires_for_build_editable(config_settings=None):\n ...\n\nThis hook MUST return an additional list of strings containing PEP 508\ndependency specifications, above and beyond those specified in the\npyproject.toml file, to be installed when calling the build_editable\nhooks.\n\nIf not defined, the default implementation is equivalent to return [].\n\nprepare_metadata_for_build_editable\n\n def prepare_metadata_for_build_editable(metadata_directory, config_settings=None):\n ...\n\nMust create a .dist-info directory containing wheel metadata inside the\nspecified metadata_directory (i.e., creates a directory like\n{metadata_directory}/{package}-{version}.dist-info/). This directory\nMUST be a valid .dist-info directory as defined in the wheel\nspecification, except that it need not contain RECORD or signatures. The\nhook MAY also create other files inside this directory, and a build\nfrontend MUST preserve, but otherwise ignore, such files; the intention\nhere is that in cases where the metadata depends on build-time\ndecisions, the build backend may need to record these decisions in some\nconvenient format for re-use by the actual wheel-building step.\n\nThis must return the basename (not the full path) of the .dist-info\ndirectory it creates, as a unicode string.\n\nIf a build frontend needs this information and the method is not\ndefined, it should call build_editable and look at the resulting\nmetadata directly.\n\nWhat to put in the wheel\n\nBuild backends must populate the generated wheel with files that when\ninstalled will result in an editable install. Build backends may use\ndifferent techniques to achieve the goals of an editable install. This\nsection provides examples and is not normative.\n\n- Build backends may choose to place a .pth file at the root of the\n .whl file, containing the root directory of the source tree. This\n approach is simple but not very precise, although it may be\n considered good enough (especially when using the src layout) and is\n similar to what setup.py develop currently does.\n- The editables library shows how to build proxy modules that provide\n a high quality editable installation. It accepts a list of modules\n to include, and hide. When imported, these proxy modules replace\n themselves with the code from the source tree. Path-based methods\n make all scripts under a path importable, often including the\n project's own setup.py and other scripts that would not be part of a\n normal installation. The proxy strategy can achieve a higher level\n of fidelity than path-based methods.\n- Symbolic links are another useful mechanism to realize editable\n installs. Since, at the time this writing, the wheel specification\n does not support symbolic links, they are not directly usable to\n set-up symbolic links in the target environment. It is however\n possible for the backend to create a symlink structure in some build\n directory of the source tree, and add that directory to the python\n path via a .pth file in the \"editable\" wheel. If some files linked\n in this manner depend on python implementation or version, ABI or\n platform, care must be taken to generate the link structure in\n different directories depending on compatibility tags, so the same\n project tree can be installed in editable mode in multiple\n environments.\n\nFrontend requirements\n\nFrontends must install \"editable\" wheels in the same way as regular\nwheels. This also means uninstallation of editables does not require any\nspecial treatment.\n\nFrontends must create a direct_url.json file in the .dist-info directory\nof the installed distribution, in compliance with PEP 610. The url value\nmust be a file:// url pointing to the project directory (i.e. the\ndirectory containing pyproject.toml), and the dir_info value must be\n{'editable': true}.\n\nFrontends must execute get_requires_for_build_editable hooks in an\nenvironment which contains the bootstrap requirements specified in the\npyproject.toml file.\n\nFrontends must execute the prepare_metadata_for_build_editable and\nbuild_editable hooks in an environment which contains the bootstrap\nrequirements from pyproject.toml and those specified by the\nget_requires_for_build_editable hook.\n\nFrontends must not expose the wheel obtained from build_editable to end\nusers. The wheel must be discarded after installation and must not be\ncached nor distributed.\n\nLimitations\n\nWith regard to the wheel .data directory, this PEP focuses on making the\npurelib and platlib categories (installed into site-packages)\n\"editable\". It does not make special provision for the other categories\nsuch as headers, data and scripts. Package authors are encouraged to use\nconsole_scripts, make their scripts tiny wrappers around library\nfunctionality, or manage these from the source checkout during\ndevelopment.\n\nPrototypes\n\nAt the time of writing this PEP, several prototype implementations are\navailable in various frontends and backends. We provide links below to\nillustrate possible approaches.\n\nFrontends:\n\n- pip (pull request)\n\nBuild backends:\n\n- enscons (pull request 1, pull request 2)\n- flit (pull request)\n- hatchling (sdist)\n- pdm (pull request)\n- setuptools (setuptools_pep660 repository)\n\nRejected ideas\n\neditable local version identifier\n\nThe ideas of having build backends append or modify the local version\nidentifier to include the editable string has been rejected because it\nwould not satisfy == version speicifier that include the local version\nidentifier. In other words pkg==1.0+local is not satisfied by version\n1.0+local.editable.\n\nVirtual wheel\n\nAnother approach was proposed in PEP 662, where the build backend\nreturns a mapping from source files and directories to the installed\nlayout. It is then up to the installer frontend to realize the editable\ninstallation by whatever means it deems adequate for its users.\n\nIn terms of capabilities, both proposals provide the core \"editable\"\nfeature.\n\nThe key difference is that PEP 662 leaves it to the frontend to decide\nhow the editable installation will be realized, while with this PEP, the\nchoice must be made by the backend. Both approaches can in principle\nprovide several editable installation methods for a given project, and\nlet the developer choose one at install time.\n\nAt the time of writing this PEP, it is clear that the community has a\nwide range of theoretical and practical expectations about editable\ninstalls. The reality is that the only one there is wide experience with\nis path insertion via .pth (i.e. what setup.py develop does).\n\nWe believe that PEP 660 better addresses these \"unknown unknowns\" today\nin the most reliable way, by letting project authors select the backend\nor implement the method that provides the editable mechanism that best\nsuit their requirements, and test it works correctly. Since the frontend\nhas no latitude in how to install the \"editable\" wheel, in case of\nissue, there is only one place to investigate: the build backend.\n\nWith PEP 662, issues need to be investigated in the frontend, the\nbackend and possiblty the specification. There is also a high\nprobability that different frontends, implementing the specification in\ndifferent ways, will produce installations that behave differently than\nproject authors intended, creating confusion, or worse, projects that\nonly work with specific frontends or IDEs.\n\nUnpacked wheel\n\nA prototype was made that created an unpacked wheel in a temporary\ndirectory, to be copied to the target environment by the frontend. This\napproach was not pursued because a wheel archive is easy to create for\nthe backend, and using a wheel as communication mechanism is a better\nfit with the PEP 517 philosophy, and therefore keeps things simpler for\nthe frontend.\n\nReferences\n\nCopyright\n\nThis document is placed in the public domain or under the\nCC0-1.0-Universal license, whichever is more permissive.\n\n\f\n\n Local Variables: mode: indented-text indent-tabs-mode: nil\n sentence-end-double-space: t fill-column: 70 coding: utf-8 End:"},"source":{"kind":"string","value":"python-peps"},"added":{"kind":"string","value":"2024-10-18T13:23:34.009983"},"created":{"kind":"timestamp","value":"2021-03-30T00:00:00","string":"2021-03-30T00:00:00"},"metadata":{"kind":"string","value":"{\n \"license\": \"Public Domain\",\n \"url\": \"https://peps.python.org/pep-0660/\",\n \"authors\": [\n \"Daniel Holth\",\n \"Stéphane Bidoul\"\n ],\n \"pep_number\": \"0660\",\n \"pandoc_version\": \"3.5\"\n}"}}},{"rowIdx":589,"cells":{"id":{"kind":"string","value":"3113"},"text":{"kind":"string","value":"PEP: 3113 Title: Removal of Tuple Parameter Unpacking Version:\n$Revision$ Last-Modified: $Date$ Author: Brett Cannon \nStatus: Final Type: Standards Track Content-Type: text/x-rst Created:\n02-Mar-2007 Python-Version: 3.0 Post-History:\n\nAbstract\n\nTuple parameter unpacking is the use of a tuple as a parameter in a\nfunction signature so as to have a sequence argument automatically\nunpacked. An example is:\n\n def fxn(a, (b, c), d):\n pass\n\nThe use of (b, c) in the signature requires that the second argument to\nthe function be a sequence of length two (e.g., [42, -13]). When such a\nsequence is passed it is unpacked and has its values assigned to the\nparameters, just as if the statement b, c = [42, -13] had been executed\nin the parameter.\n\nUnfortunately this feature of Python's rich function signature\nabilities, while handy in some situations, causes more issues than they\nare worth. Thus this PEP proposes their removal from the language in\nPython 3.0.\n\nWhy They Should Go\n\nIntrospection Issues\n\nPython has very powerful introspection capabilities. These extend to\nfunction signatures. There are no hidden details as to what a function's\ncall signature is. In general it is fairly easy to figure out various\ndetails about a function's signature by viewing the function object and\nvarious attributes on it (including the function's func_code attribute).\n\nBut there is great difficulty when it comes to tuple parameters. The\nexistence of a tuple parameter is denoted by its name being made of a .\nand a number in the co_varnames attribute of the function's code object.\nThis allows the tuple argument to be bound to a name that only the\nbytecode is aware of and cannot be typed in Python source. But this does\nnot specify the format of the tuple: its length, whether there are\nnested tuples, etc.\n\nIn order to get all of the details about the tuple from the function one\nmust analyse the bytecode of the function. This is because the first\nbytecode in the function literally translates into the tuple argument\nbeing unpacked. Assuming the tuple parameter is named .1 and is expected\nto unpack to variables spam and monty (meaning it is the tuple\n(spam, monty)), the first bytecode in the function will be for the\nstatement spam, monty = .1. This means that to know all of the details\nof the tuple parameter one must look at the initial bytecode of the\nfunction to detect tuple unpacking for parameters formatted as \\.\\d+ and\ndeduce any and all information about the expected argument. Bytecode\nanalysis is how the inspect.getargspec function is able to provide\ninformation on tuple parameters. This is not easy to do and is\nburdensome on introspection tools as they must know how Python bytecode\nworks (an otherwise unneeded burden as all other types of parameters do\nnot require knowledge of Python bytecode).\n\nThe difficulty of analysing bytecode not withstanding, there is another\nissue with the dependency on using Python bytecode. IronPython[1] does\nnot use Python's bytecode. Because it is based on the .NET framework it\ninstead stores MSIL[2] in func_code.co_code attribute of the function.\nThis fact prevents the inspect.getargspec function from working when run\nunder IronPython. It is unknown whether other Python implementations are\naffected but is reasonable to assume if the implementation is not just a\nre-implementation of the Python virtual machine.\n\nNo Loss of Abilities If Removed\n\nAs mentioned in Introspection Issues, to handle tuple parameters the\nfunction's bytecode starts with the bytecode required to unpack the\nargument into the proper parameter names. This means that there is no\nspecial support required to implement tuple parameters and thus there is\nno loss of abilities if they were to be removed, only a possible\nconvenience (which is addressed in Why They Should (Supposedly) Stay).\n\nThe example function at the beginning of this PEP could easily be\nrewritten as:\n\n def fxn(a, b_c, d):\n b, c = b_c\n pass\n\nand in no way lose functionality.\n\nException To The Rule\n\nWhen looking at the various types of parameters that a Python function\ncan have, one will notice that tuple parameters tend to be an exception\nrather than the rule.\n\nConsider PEP 3102 (keyword-only arguments) and PEP 3107 (function\nannotations). Both PEPs have been accepted and introduce new\nfunctionality within a function's signature. And yet for both PEPs the\nnew feature cannot be applied to tuple parameters as a whole. PEP 3102\nhas no support for tuple parameters at all (which makes sense as there\nis no way to reference a tuple parameter by name). PEP 3107 allows\nannotations for each item within the tuple (e.g., (x:int, y:int)), but\nnot the whole tuple (e.g., (x, y):int).\n\nThe existence of tuple parameters also places sequence objects\nseparately from mapping objects in a function signature. There is no way\nto pass in a mapping object (e.g., a dict) as a parameter and have it\nunpack in the same fashion as a sequence does into a tuple parameter.\n\nUninformative Error Messages\n\nConsider the following function:\n\n def fxn((a, b), (c, d)):\n pass\n\nIf called as fxn(1, (2, 3)) one is given the error message\nTypeError: unpack non-sequence. This error message in no way tells you\nwhich tuple was not unpacked properly. There is also no indication that\nthis was a result that occurred because of the arguments. Other error\nmessages regarding arguments to functions explicitly state its relation\nto the signature: TypeError: fxn() takes exactly 2 arguments (0 given),\netc.\n\nLittle Usage\n\nWhile an informal poll of the handful of Python programmers I know\npersonally and from the PyCon 2007 sprint indicates a huge majority of\npeople do not know of this feature and the rest just do not use it, some\nhard numbers is needed to back up the claim that the feature is not\nheavily used.\n\nIterating over every line in Python's code repository in the Lib/\ndirectory using the regular expression ^\\s*def\\s*\\w+\\s*\\( to detect\nfunction and method definitions there were 22,252 matches in the trunk.\n\nTacking on .*,\\s*\\( to find def statements that contained a tuple\nparameter, only 41 matches were found. This means that for def\nstatements, only 0.18% of them seem to use a tuple parameter.\n\nWhy They Should (Supposedly) Stay\n\nPractical Use\n\nIn certain instances tuple parameters can be useful. A common example is\ncode that expects a two-item tuple that represents a Cartesian point.\nWhile true it is nice to be able to have the unpacking of the x and y\ncoordinates for you, the argument is that this small amount of practical\nusefulness is heavily outweighed by other issues pertaining to tuple\nparameters. And as shown in No Loss Of Abilities If Removed, their use\nis purely practical and in no way provide a unique ability that cannot\nbe handled in other ways very easily.\n\nSelf-Documentation For Parameters\n\nIt has been argued that tuple parameters provide a way of\nself-documentation for parameters that are expected to be of a certain\nsequence format. Using our Cartesian point example from Practical Use,\nseeing (x, y) as a parameter in a function makes it obvious that a tuple\nof length two is expected as an argument for that parameter.\n\nBut Python provides several other ways to document what parameters are\nfor. Documentation strings are meant to provide enough information\nneeded to explain what arguments are expected. Tuple parameters might\ntell you the expected length of a sequence argument, it does not tell\nyou what that data will be used for. One must also read the docstring to\nknow what other arguments are expected if not all parameters are tuple\nparameters.\n\nFunction annotations (which do not work with tuple parameters) can also\nsupply documentation. Because annotations can be of any form, what was\nonce a tuple parameter can be a single argument parameter with an\nannotation of tuple, tuple(2), Cartesian point, (x, y), etc. Annotations\nprovide great flexibility for documenting what an argument is expected\nto be for a parameter, including being a sequence of a certain length.\n\nTransition Plan\n\nTo transition Python 2.x code to 3.x where tuple parameters are removed,\ntwo steps are suggested. First, the proper warning is to be emitted when\nPython's compiler comes across a tuple parameter in Python 2.6. This\nwill be treated like any other syntactic change that is to occur in\nPython 3.0 compared to Python 2.6.\n\nSecond, the 2to3 refactoring tool[3] will gain a fixer [4] for\ntranslating tuple parameters to being a single parameter that is\nunpacked as the first statement in the function. The name of the new\nparameter will be changed. The new parameter will then be unpacked into\nthe names originally used in the tuple parameter. This means that the\nfollowing function:\n\n def fxn((a, (b, c))):\n pass\n\nwill be translated into:\n\n def fxn(a_b_c):\n (a, (b, c)) = a_b_c\n pass\n\nAs tuple parameters are used by lambdas because of the single expression\nlimitation, they must also be supported. This is done by having the\nexpected sequence argument bound to a single parameter and then indexing\non that parameter:\n\n lambda (x, y): x + y\n\nwill be translated into:\n\n lambda x_y: x_y[0] + x_y[1]\n\nReferences\n\nCopyright\n\nThis document has been placed in the public domain.\n\n\f\n\n Local Variables: mode: indented-text indent-tabs-mode: nil\n sentence-end-double-space: t fill-column: 70 coding: utf-8 End:\n\n[1] IronPython\n(http://www.codeplex.com/Wiki/View.aspx?ProjectName=IronPython)\n\n[2] Microsoft Intermediate Language\n(http://msdn.microsoft.com/library/en-us/cpguide/html/cpconmicrosoftintermediatelanguagemsil.asp?frame=true)\n\n[3] 2to3 refactoring tool\n(http://svn.python.org/view/sandbox/trunk/2to3/)\n\n[4] 2to3 fixer\n(http://svn.python.org/view/sandbox/trunk/2to3/fixes/fix_tuple_params.py)"},"source":{"kind":"string","value":"python-peps"},"added":{"kind":"string","value":"2024-10-18T13:23:34.021760"},"created":{"kind":"timestamp","value":"2007-03-02T00:00:00","string":"2007-03-02T00:00:00"},"metadata":{"kind":"string","value":"{\n \"license\": \"Public Domain\",\n \"url\": \"https://peps.python.org/pep-3113/\",\n \"authors\": [\n \"Brett Cannon\"\n ],\n \"pep_number\": \"3113\",\n \"pandoc_version\": \"3.5\"\n}"}}},{"rowIdx":590,"cells":{"id":{"kind":"string","value":"0332"},"text":{"kind":"string","value":"PEP: 332 Title: Byte vectors and String/Unicode Unification Version:\n$Revision$ Last-Modified: $Date$ Author: Skip Montanaro \nStatus: Rejected Type: Standards Track Content-Type: text/x-rst Created:\n11-Aug-2004 Python-Version: 2.5 Post-History:\n\nAbstract\n\nThis PEP outlines the introduction of a raw bytes sequence object and\nthe unification of the current str and unicode objects.\n\nRejection Notice\n\nThis PEP is rejected in this form. The author has expressed lack of time\nto continue to shepherd it, and discussion on python-dev has moved to a\nslightly different proposal which will (eventually) be written up as a\nnew PEP. See the thread starting at\nhttps://mail.python.org/pipermail/python-dev/2006-February/060930.html.\n\nRationale\n\nPython's current string objects are overloaded. They serve both to hold\nASCII and non-ASCII character data and to also hold sequences of raw\nbytes which have no reasonable interpretation as displayable character\nsequences. This overlap hasn't been a big problem in the past, but as\nPython moves closer to requiring source code to be properly encoded, the\nuse of strings to represent raw byte sequences will be more problematic.\nIn addition, as Python's Unicode support has improved, it's easier to\nconsider strings as ASCII-encoded Unicode objects.\n\nProposed Implementation\n\nThe number in parentheses indicates the Python version in which the\nfeature will be introduced.\n\n- Add a bytes builtin which is just a synonym for str. (2.5)\n- Add a b\"...\" string literal which is equivalent to raw string\n literals, with the exception that values which conflict with the\n source encoding of the containing file not generate warnings. (2.5)\n- Warn about the use of variables named \"bytes\". (2.5 or 2.6)\n- Introduce a bytes builtin which refers to a sequence distinct from\n the str type. (2.6)\n- Make str a synonym for unicode. (3.0)\n\nBytes Object API\n\nTBD.\n\nIssues\n\n- Can this be accomplished before Python 3.0?\n- Should bytes objects be mutable or immutable? (Guido seems to like\n them to be mutable.)\n\nCopyright\n\nThis document has been placed in the public domain.\n\n\f\n\n Local Variables: mode: indented-text indent-tabs-mode: nil\n sentence-end-double-space: t fill-column: 70 End:"},"source":{"kind":"string","value":"python-peps"},"added":{"kind":"string","value":"2024-10-18T13:23:34.028984"},"created":{"kind":"timestamp","value":"2004-08-11T00:00:00","string":"2004-08-11T00:00:00"},"metadata":{"kind":"string","value":"{\n \"license\": \"Public Domain\",\n \"url\": \"https://peps.python.org/pep-0332/\",\n \"authors\": [\n \"Skip Montanaro\"\n ],\n \"pep_number\": \"0332\",\n \"pandoc_version\": \"3.5\"\n}"}}},{"rowIdx":591,"cells":{"id":{"kind":"string","value":"0500"},"text":{"kind":"string","value":"PEP: 500 Title: A protocol for delegating datetime methods to their\ntzinfo implementations Version: $Revision$ Last-Modified: $Date$ Author:\nAlexander Belopolsky , Tim Peters\n Discussions-To: datetime-sig@python.org Status:\nRejected Type: Standards Track Content-Type: text/x-rst Requires: 495\nCreated: 08-Aug-2015 Resolution:\nhttps://mail.python.org/pipermail/datetime-sig/2015-August/000354.html\n\nAbstract\n\nThis PEP specifies a new protocol (PDDM - \"A Protocol for Delegating\nDatetime Methods\") that can be used by concrete implementations of the\ndatetime.tzinfo interface to override aware datetime arithmetics,\nformatting and parsing. We describe changes to the datetime.datetime\nclass to support the new protocol and propose a new abstract class\ndatetime.tzstrict that implements parts of this protocol necessary to\nmake aware datetime instances to follow \"strict\" arithmetic rules.\n\nRationale\n\nAs of Python 3.5, aware datetime instances that share a tzinfo object\nfollow the rules of arithmetics that are induced by a simple bijection\nbetween (year, month, day, hour, minute, second, microsecond) 7-tuples\nand large integers. In this arithmetics, the difference between\nYEAR-11-02T12:00 and YEAR-11-01T12:00 is always 24 hours, even though in\nthe US/Eastern timezone, for example, there are 25 hours between\n2014-11-01T12:00 and 2014-11-02T12:00 because the local clocks were\nrolled back one hour at 2014-11-02T02:00, introducing an extra hour in\nthe night between 2014-11-01 and 2014-11-02.\n\nMany business applications require the use of Python's simplified view\nof local dates. No self-respecting car rental company will charge its\ncustomers more for a week that straddles the end of DST than for any\nother week or require that they return the car an hour early. Therefore,\nchanging the current rules for aware datetime arithmetics will not only\ncreate a backward compatibility nightmare, it will eliminate support for\nlegitimate and common use cases.\n\nSince it is impossible to choose universal rules for local time\narithmetics, we propose to delegate implementation of those rules to the\nclasses that implement datetime.tzinfo interface. With such delegation\nin place, users will be able to choose between different arithmetics by\nsimply picking instances of different classes for the value of tzinfo.\n\nProtocol\n\nSubtraction of datetime\n\nA tzinfo subclass supporting the PDDM, may define a method called\n__datetime_diff__ that should take two datetime.datetime instances and\nreturn a datetime.timedelta instance representing the time elapsed from\nthe time represented by the first datetime instance to another.\n\nAddition\n\nA tzinfo subclass supporting the PDDM, may define a method called\n__datetime_add__ that should take two arguments--a datetime and a\ntimedelta instances--and return a datetime instance.\n\nSubtraction of timedelta\n\nA tzinfo subclass supporting the PDDM, may define a method called\n__datetime_sub__ that should take two arguments--a datetime and a\ntimedelta instances--and return a datetime instance.\n\nFormatting\n\nA tzinfo subclass supporting the PDDM, may define methods called\n__datetime_isoformat__ and __datetime_strftime__.\n\nThe __datetime_isoformat__ method should take a datetime instance and an\noptional separator and produce a string representation of the given\ndatetime instance.\n\nThe __datetime_strftime__ method should take a datetime instance and a\nformat string and produce a string representation of the given datetime\ninstance formatted according to the given format.\n\nParsing\n\nA tzinfo subclass supporting the PDDM, may define a class method called\n__datetime_strptime__ and register the \"canonical\" names of the\ntimezones that it implements with a registry. TODO Describe a registry.\n\nChanges to datetime methods\n\nSubtraction\n\n class datetime:\n def __sub__(self, other):\n if isinstance(other, datetime):\n try:\n self_diff = self.tzinfo.__datetime_diff__\n except AttributeError:\n self_diff = None\n try:\n other_diff = self.tzinfo.__datetime_diff__\n except AttributeError:\n other_diff = None\n if self_diff is not None:\n if self_diff is not other_diff and self_diff.__func__ is not other_diff.__func__:\n raise ValueError(\"Cannot find difference of two datetimes with \"\n \"different tzinfo.__datetime_diff__ implementations.\")\n return self_diff(self, other)\n elif isinstance(other, timedelta):\n try:\n sub = self.tzinfo.__datetime_sub__\n except AttributeError:\n pass\n else:\n return sub(self, other)\n return self + -other\n else:\n return NotImplemented\n # current implementation\n\nAddition\n\nAddition of a timedelta to a datetime instance will be delegated to the\nself.tzinfo.__datetime_add__ method whenever it is defined.\n\nStrict arithmetics\n\nA new abstract subclass of datetime.tzinfo class called\ndatetime.tzstrict will be added to the datetime module. This subclass\nwill not implement the utcoffset(), tzname() or dst() methods, but will\nimplement some of the methods of the PDDM.\n\nThe PDDM methods implemented by tzstrict will be equivalent to the\nfollowing:\n\n class tzstrict(tzinfo):\n def __datetime_diff__(self, dt1, dt2):\n utc_dt1 = dt1.astimezone(timezone.utc)\n utc_dt2 = dt2.astimezone(timezone.utc)\n return utc_dt2 - utc_dt1\n\n def __datetime_add__(self, dt, delta):\n utc_dt = dt.astimezone(timezone.utc)\n return (utc_dt + delta).astimezone(self)\n\n def __datetime_sub__(self, dt, delta):\n utc_dt = dt.astimezone(timezone.utc)\n return (utc_dt - delta).astimezone(self)\n\nParsing and formatting\n\nDatetime methods strftime and isoformat will delegate to the namesake\nmethods of their tzinfo members whenever those methods are defined.\n\nWhen the datetime.strptime method is given a format string that contains\na %Z instruction, it will lookup the tzinfo implementation in the\nregistry by the given timezone name and call its __datetime_strptime__\nmethod.\n\nApplications\n\nThis PEP will enable third party implementation of many different\ntimekeeping schemes including:\n\n- Julian / Microsoft Excel calendar.\n- \"Right\" timezones with the leap second support.\n- French revolutionary calendar (with a lot of work).\n\nCopyright\n\nThis document has been placed in the public domain."},"source":{"kind":"string","value":"python-peps"},"added":{"kind":"string","value":"2024-10-18T13:23:34.038003"},"created":{"kind":"timestamp","value":"2015-08-08T00:00:00","string":"2015-08-08T00:00:00"},"metadata":{"kind":"string","value":"{\n \"license\": \"Public Domain\",\n \"url\": \"https://peps.python.org/pep-0500/\",\n \"authors\": [\n \"Alexander Belopolsky\",\n \"Tim Peters\"\n ],\n \"pep_number\": \"0500\",\n \"pandoc_version\": \"3.5\"\n}"}}},{"rowIdx":592,"cells":{"id":{"kind":"string","value":"0449"},"text":{"kind":"string","value":"PEP: 449 Title: Removal of the PyPI Mirror Auto Discovery and Naming\nScheme Version: $Revision$ Last-Modified: $Date$ Author: Donald Stufft\n BDFL-Delegate: Richard Jones \nDiscussions-To: distutils-sig@python.org Status: Final Type: Process\nTopic: Packaging Content-Type: text/x-rst Created: 04-Aug-2013\nPost-History: 04-Aug-2013 Replaces: 381 Resolution:\nhttps://mail.python.org/pipermail/distutils-sig/2013-August/022518.html\n\nAbstract\n\nThis PEP provides a path to deprecate and ultimately remove the auto\ndiscovery of PyPI mirrors as well as the hard coded naming scheme which\nrequires delegating a domain name under pypi.python.org to a third\nparty.\n\nRationale\n\nThe PyPI mirroring infrastructure (defined in PEP 381) provides a means\nto mirror the content of PyPI used by the automatic installers. It also\nprovides a method for auto discovery of mirrors and a consistent naming\nscheme.\n\nThere are a number of problems with the auto discovery protocol and the\nnaming scheme:\n\n- They give control over a *.python.org domain name to a third party,\n allowing that third party to set or read cookies on the\n pypi.python.org and python.org domain name.\n- The use of a sub domain of pypi.python.org means that the mirror\n operators will never be able to get a SSL certificate of their own,\n and giving them one for a python.org domain name is unlikely to\n happen.\n- The auto discovery uses an unauthenticated protocol (DNS).\n- The lack of a TLS certificate on these domains means that clients\n can not be sure that they have not been a victim of DNS poisoning or\n a MITM attack.\n- The auto discovery protocol was designed to enable a client to\n automatically select a mirror for use. This is no longer a\n requirement because the CDN that PyPI is now using a globally\n distributed network of servers which will automatically select one\n close to the client without any effort on the clients part.\n- The auto discovery protocol and use of the consistent naming scheme\n has only ever been implemented by one installer (pip), and its\n implementation, besides being insecure, has serious issues with\n performance and is slated for removal with its next release (1.5).\n- While there are provisions in PEP 381 that would solve some of these\n issues for a dedicated client it would not solve the issues that\n affect a users browser. Additionally these provisions have not been\n implemented by any installer to date.\n\nDue to the number of issues, some of them very serious, and the CDN\nwhich provides most of the benefit of the auto discovery and consistent\nnaming scheme this PEP proposes to first deprecate and then remove the\n[a..z].pypi.python.org names for mirrors and the last.pypi.python.org\nname for the auto discovery protocol. The ability to mirror and the\nmethod of mirror will not be affected and will continue to exist as\nwritten in PEP 381. Operators of existing mirrors are encouraged to\nacquire their own domains and certificates to use for their mirrors if\nthey wish to continue hosting them.\n\nPlan for Deprecation & Removal\n\nImmediately upon acceptance of this PEP documentation on PyPI will be\nupdated to reflect the deprecated nature of the official public mirrors\nand will direct users to external resources like\nhttp://www.pypi-mirrors.org/ to discover unofficial public mirrors if\nthey wish to use one.\n\nMirror operators, if they wish to continue operating their mirror,\nshould acquire a domain name to represent their mirror and, if they are\nable, a TLS certificate. Once they have acquired a domain they should\nredirect their assigned N.pypi.python.org domain name to their new\ndomain. On Feb 15th, 2014 the DNS entries for [a..z].pypi.python.org and\nlast.pypi.python.org will be removed. At any time prior to Feb 15th,\n2014 a mirror operator may request that their domain name be reclaimed\nby PyPI and pointed back at the master.\n\nWhy Feb 15th, 2014\n\nThe most critical decision of this PEP is the final cut off date. If the\ndate is too soon then it needlessly punishes people by forcing them to\ndrop everything to update their deployment scripts. If the date is too\nfar away then the extended period of time does not help with the\nmigration effort and merely puts off the migration until a later date.\n\nThe date of Feb 15th, 2014 has been chosen because it is roughly 6\nmonths from the date of the PEP. This should ensure a lengthy period of\ntime to enable people to update their deployment procedures to point to\nthe new domains names without merely padding the cut off date.\n\nWhy the DNS entries must be removed\n\nWhile it would be possible to simply reclaim the domain names used in\nmirror and direct them back at PyPI in order to prevent users from\nneeding to update configurations to point away from those domains this\nhas a number of issues.\n\n- Anyone who currently has these names hard coded in their\n configuration has them hard coded as HTTP. This means that by\n allowing these names to continue resolving we make it simple for a\n MITM operator to attack users by rewriting the redirect to HTTPS\n prior to giving it to the client.\n- The overhead of maintaining several domains pointing at PyPI has\n proved troublesome for the small number of N.pypi.python.org domains\n that have already been reclaimed. They oftentimes get mis-configured\n when things change on the service which often leaves them broken for\n months at a time until somebody notices. By leaving them in we leave\n users of these domains open to random breakages which are less\n likely to get caught or noticed.\n- People using these domains have explicitly chosen to use them for\n one reason or another. One such reason may be because they do not\n wish to deploy from a host located in a particular country. If these\n domains continue to resolve but do not point at their existing\n locations we have silently removed this choice from the existing\n users of those domains.\n\nThat being said, removing the entries will require users who have\nmodified their configuration to either point back at the master (PyPI)\nor select a new mirror name to point at. This is regarded as a\nregrettable requirement to protect PyPI itself and the users of the\nmirrors from the attacks outlined above or, at the very least, require\nthem to make an informed decision about the insecurity.\n\nPublic or Private Mirrors\n\nThe mirroring protocol will continue to exist as defined in PEP 381 and\npeople are encouraged to host public and private mirrors if they so\ndesire. The recommended mirroring client is Bandersnatch.\n\nCopyright\n\nThis document has been placed in the public domain.\n\n\f\n\n Local Variables: mode: indented-text indent-tabs-mode: nil\n sentence-end-double-space: t fill-column: 70 coding: utf-8 End:"},"source":{"kind":"string","value":"python-peps"},"added":{"kind":"string","value":"2024-10-18T13:23:34.046910"},"created":{"kind":"timestamp","value":"2013-08-04T00:00:00","string":"2013-08-04T00:00:00"},"metadata":{"kind":"string","value":"{\n \"license\": \"Public Domain\",\n \"url\": \"https://peps.python.org/pep-0449/\",\n \"authors\": [\n \"Donald Stufft\"\n ],\n \"pep_number\": \"0449\",\n \"pandoc_version\": \"3.5\"\n}"}}},{"rowIdx":593,"cells":{"id":{"kind":"string","value":"0597"},"text":{"kind":"string","value":"PEP: 597 Title: Add optional EncodingWarning Last-Modified: 07-Aug-2021\nAuthor: Inada Naoki Status: Final Type:\nStandards Track Content-Type: text/x-rst Created: 05-Jun-2019\nPython-Version: 3.10\n\nAbstract\n\nAdd a new warning category EncodingWarning. It is emitted when the\nencoding argument to open() is omitted and the default locale-specific\nencoding is used.\n\nThe warning is disabled by default. A new -X warn_default_encoding\ncommand-line option and a new PYTHONWARNDEFAULTENCODING environment\nvariable can be used to enable it.\n\nA \"locale\" argument value for encoding is added too. It explicitly\nspecifies that the locale encoding should be used, silencing the\nwarning.\n\nMotivation\n\nUsing the default encoding is a common mistake\n\nDevelopers using macOS or Linux may forget that the default encoding is\nnot always UTF-8.\n\nFor example, using long_description = open(\"README.md\").read() in\nsetup.py is a common mistake. Many Windows users cannot install such\npackages if there is at least one non-ASCII character (e.g. emoji,\nauthor names, copyright symbols, and the like) in their UTF-8-encoded\nREADME.md file.\n\nOf the 4000 most downloaded packages from PyPI, 489 use non-ASCII\ncharacters in their README, and 82 fail to install from source on\nnon-UTF-8 locales due to not specifying an encoding for a non-ASCII\nfile.[1]\n\nAnother example is logging.basicConfig(filename=\"log.txt\"). Some users\nmight expect it to use UTF-8 by default, but the locale encoding is\nactually what is used.[2]\n\nEven Python experts may assume that the default encoding is UTF-8. This\ncreates bugs that only happen on Windows; see[3],[4],[5], and[6] for\nexample.\n\nEmitting a warning when the encoding argument is omitted will help find\nsuch mistakes.\n\nExplicit way to use locale-specific encoding\n\nopen(filename) isn't explicit about which encoding is expected:\n\n- If ASCII is assumed, this isn't a bug, but may result in decreased\n performance on Windows, particularly with non-Latin-1 locale\n encodings\n- If UTF-8 is assumed, this may be a bug or a platform-specific script\n- If the locale encoding is assumed, the behavior is as expected (but\n could change if future versions of Python modify the default)\n\nFrom this point of view, open(filename) is not readable code.\n\nencoding=locale.getpreferredencoding(False) can be used to specify the\nlocale encoding explicitly, but it is too long and easy to misuse (e.g.\none can forget to pass False as its argument).\n\nThis PEP provides an explicit way to specify the locale encoding.\n\nPrepare to change the default encoding to UTF-8\n\nSince UTF-8 has become the de-facto standard text encoding, we might\ndefault to it for opening files in the future.\n\nHowever, such a change will affect many applications and libraries. If\nwe start emitting DeprecationWarning everywhere the encoding argument is\nomitted, it will be too noisy and painful.\n\nAlthough this PEP doesn't propose changing the default encoding, it will\nhelp enable that change by:\n\n- Reducing the number of omitted encoding arguments in libraries\n before we start emitting a DeprecationWarning by default.\n- Allowing users to pass encoding=\"locale\" to suppress the current\n warning and any DeprecationWarning added in the future, as well as\n retaining consistent behavior if later Python versions change the\n default, ensuring support for any Python version >=3.10.\n\nSpecification\n\nEncodingWarning\n\nAdd a new EncodingWarning warning class as a subclass of Warning. It is\nemitted when the encoding argument is omitted and the default\nlocale-specific encoding is used.\n\nOptions to enable the warning\n\nThe -X warn_default_encoding option and the PYTHONWARNDEFAULTENCODING\nenvironment variable are added. They are used to enable EncodingWarning.\n\nsys.flags.warn_default_encoding is also added. The flag is true when\nEncodingWarning is enabled.\n\nWhen the flag is set, io.TextIOWrapper(), open() and other modules using\nthem will emit EncodingWarning when the encoding argument is omitted.\n\nSince EncodingWarning is a subclass of Warning, they are shown by\ndefault (if the warn_default_encoding flag is set), unlike\nDeprecationWarning.\n\nencoding=\"locale\"\n\nio.TextIOWrapper will accept \"locale\" as a valid argument to encoding.\nIt has the same meaning as the current encoding=None, except that\nio.TextIOWrapper doesn't emit EncodingWarning when encoding=\"locale\" is\nspecified.\n\nio.text_encoding()\n\nio.text_encoding() is a helper for functions with an encoding=None\nparameter that pass it to io.TextIOWrapper() or open().\n\nA pure Python implementation will look like this:\n\n def text_encoding(encoding, stacklevel=1):\n \"\"\"A helper function to choose the text encoding.\n\n When *encoding* is not None, just return it.\n Otherwise, return the default text encoding (i.e. \"locale\").\n\n This function emits an EncodingWarning if *encoding* is None and\n sys.flags.warn_default_encoding is true.\n\n This function can be used in APIs with an encoding=None parameter\n that pass it to TextIOWrapper or open.\n However, please consider using encoding=\"utf-8\" for new APIs.\n \"\"\"\n if encoding is None:\n if sys.flags.warn_default_encoding:\n import warnings\n warnings.warn(\n \"'encoding' argument not specified.\",\n EncodingWarning, stacklevel + 2)\n encoding = \"locale\"\n return encoding\n\nFor example, pathlib.Path.read_text() can use it like this:\n\n def read_text(self, encoding=None, errors=None):\n encoding = io.text_encoding(encoding)\n with self.open(mode='r', encoding=encoding, errors=errors) as f:\n return f.read()\n\nBy using io.text_encoding(), EncodingWarning is emitted for the caller\nof read_text() instead of read_text() itself.\n\nAffected standard library modules\n\nMany standard library modules will be affected by this change.\n\nMost APIs accepting encoding=None will use io.text_encoding() as written\nin the previous section.\n\nWhere using the locale encoding as the default encoding is reasonable,\nencoding=\"locale\" will be used instead. For example, the subprocess\nmodule will use the locale encoding as the default for pipes.\n\nMany tests use open() without encoding specified to read ASCII text\nfiles. They should be rewritten with encoding=\"ascii\".\n\nRationale\n\nOpt-in warning\n\nAlthough DeprecationWarning is suppressed by default, always emitting\nDeprecationWarning when the encoding argument is omitted would be too\nnoisy.\n\nNoisy warnings may lead developers to dismiss the DeprecationWarning.\n\n\"locale\" is not a codec alias\n\nWe don't add \"locale\" as a codec alias because the locale can be changed\nat runtime.\n\nAdditionally, TextIOWrapper checks os.device_encoding() when\nencoding=None. This behavior cannot be implemented in a codec.\n\nBackward Compatibility\n\nThe new warning is not emitted by default, so this PEP is 100%\nbackwards-compatible.\n\nForward Compatibility\n\nPassing \"locale\" as the argument to encoding is not forward-compatible.\nCode using it will not work on Python older than 3.10, and will instead\nraise LookupError: unknown encoding: locale.\n\nUntil developers can drop Python 3.9 support, EncodingWarning can only\nbe used for finding missing encoding=\"utf-8\" arguments.\n\nHow to Teach This\n\nFor new users\n\nSince EncodingWarning is used to write cross-platform code, there is no\nneed to teach it to new users.\n\nWe can just recommend using UTF-8 for text files and using\nencoding=\"utf-8\" when opening them.\n\nFor experienced users\n\nUsing open(filename) to read text files encoded in UTF-8 is a common\nmistake. It may not work on Windows because UTF-8 is not the default\nencoding.\n\nYou can use -X warn_default_encoding or PYTHONWARNDEFAULTENCODING=1 to\nfind this type of mistake.\n\nOmitting the encoding argument is not a bug when opening text files\nencoded in the locale encoding, but encoding=\"locale\" is recommended in\nPython 3.10 and later because it is more explicit.\n\nReference Implementation\n\nhttps://github.com/python/cpython/pull/19481\n\nDiscussions\n\nThe latest discussion thread is:\nhttps://mail.python.org/archives/list/python-dev@python.org/thread/SFYUP2TWD5JZ5KDLVSTZ44GWKVY4YNCV/\n\n- Why not implement this in linters?\n - encoding=\"locale\" and io.text_encoding() must be implemented in\n Python.\n - It is difficult to find all callers of functions wrapping open()\n or TextIOWrapper() (see the io.text_encoding() section).\n- Many developers will not use the option.\n - Some will, and report the warnings to libraries they use, so the\n option is worth it even if many developers don't enable it.\n - For example, I found[7] and[8] by running pip install -U pip,\n and[9] by running tox with the reference implementation. This\n demonstrates how this option can be used to find potential\n issues.\n\nReferences\n\nCopyright\n\nThis document is placed in the public domain or under the\nCC0-1.0-Universal license, whichever is more permissive.\n\n[1] \"Packages can't be installed when encoding is not UTF-8\"\n(https://github.com/methane/pep597-pypi-ascii)\n\n[2] \"Logging - Inconsistent behaviour when handling unicode\"\n(https://bugs.python.org/issue37111)\n\n[3] Packaging tutorial in packaging.python.org didn't specify encoding\nto read a README.md\n(https://github.com/pypa/packaging.python.org/pull/682)\n\n[4] json.tool had used locale encoding to read JSON files.\n(https://bugs.python.org/issue33684)\n\n[5] site: Potential UnicodeDecodeError when handling pth file\n(https://bugs.python.org/issue33684)\n\n[6] pypa/pip: \"Installing packages fails if Python 3 installed into path\nwith non-ASCII characters\" (https://github.com/pypa/pip/issues/9054)\n\n[7] \"site: Potential UnicodeDecodeError when handling pth file\"\n(https://bugs.python.org/issue43214)\n\n[8] \"[pypa/pip] Use encoding option or binary mode for open()\"\n(https://github.com/pypa/pip/pull/9608)\n\n[9] \"Possible UnicodeError caused by missing encoding=\"utf-8\"\"\n(https://github.com/tox-dev/tox/issues/1908)"},"source":{"kind":"string","value":"python-peps"},"added":{"kind":"string","value":"2024-10-18T13:23:34.066600"},"created":{"kind":"timestamp","value":"2019-06-05T00:00:00","string":"2019-06-05T00:00:00"},"metadata":{"kind":"string","value":"{\n \"license\": \"Public Domain\",\n \"url\": \"https://peps.python.org/pep-0597/\",\n \"authors\": [\n \"Inada Naoki\"\n ],\n \"pep_number\": \"0597\",\n \"pandoc_version\": \"3.5\"\n}"}}},{"rowIdx":594,"cells":{"id":{"kind":"string","value":"0548"},"text":{"kind":"string","value":"PEP: 548 Title: More Flexible Loop Control Version: $Revision$\nLast-Modified: $Date$ Author: R David Murray Status: Rejected Type:\nStandards Track Content-Type: text/x-rst Created: 05-Sep-2017\nPython-Version: 3.7 Post-History: 05-Aug-2017\n\nRejection Note\n\nRejection by Guido:\nhttps://mail.python.org/pipermail/python-dev/2017-September/149232.html\n\nAbstract\n\nThis PEP proposes enhancing the break and continue statements with an\noptional boolean expression that controls whether or not they execute.\nThis allows the flow of control in loops to be expressed more clearly\nand compactly.\n\nMotivation\n\nQuoting from the rejected PEP 315:\n\n It is often necessary for some code to be executed before each\n evaluation of the while loop condition. This code is often duplicated\n outside the loop, as setup code that executes once before entering the\n loop:\n\n \n while :\n \n \n\nThat PEP was rejected because no syntax was found that was superior to\nthe following form:\n\n while True:\n \n if not :\n break\n \n\nThis PEP proposes a superior form, one that also has application to for\nloops. It is superior because it makes the flow of control in loops more\nexplicit, while preserving Python's indentation aesthetic.\n\nSyntax\n\nThe syntax of the break and continue statements are extended as follows:\n\n break_stmt : \"break\" [\"if\" expression]\n continue_stmt : \"continue\" [\"if\" expression]\n\nIn addition, the syntax of the while statement is modified as follows:\n\n while_stmt : while1_stmt|while2_stmt\n while1_stmt : \"while\" expression \":\" suite\n [\"else\" \":\" suite]\n while2_stmt : \"while\" \":\" suite\n\nSemantics\n\nA break if or continue if is executed if and only if expression\nevaluates to true.\n\nA while statement with no expression loops until a break or return is\nexecuted (or an error is raised), as if it were a while True statement.\nGiven that the loop can never terminate except in a way that would not\ncause an else suite to execute, no else suite is allowed in the\nexpressionless form. If practical, it should also be an error if the\nbody of an expressionless while does not contain at least one break or\nreturn statement.\n\nJustification and Examples\n\nThe previous \"best possible\" form:\n\n while True:\n \n if not :\n break\n \n\ncould be formatted as:\n\n while True:\n \n if not : break\n \n\nThis is superficially almost identical to the form proposed by this PEP:\n\n while:\n \n break if not \n \n\nThe significant difference here is that the loop flow control keyword\nappears first in the line of code. This makes it easier to comprehend\nthe flow of control in the loop at a glance, especially when reading\ncolorized code.\n\nFor example, this is a common code pattern, taken in this case from the\ntarfile module:\n\n while True:\n buf = self._read(self.bufsize)\n if not buf:\n break\n t.append(buf)\n\nReading this, we either see the break and possibly need to think about\nwhere the while is that it applies to, since the break is indented under\nthe if, and then track backward to read the condition that triggers it;\nor, we read the condition and only afterward discover that this\ncondition changes the flow of the loop.\n\nWith the new syntax this becomes:\n\n while:\n buf = self._read(self.bufsize)\n break if not buf\n t.append(buf)\n\nReading this we first see the break, which obviously applies to the\nwhile since it is at the same level of indentation as the loop body, and\nthen we read the condition that causes the flow of control to change.\n\nFurther, consider a more complex example from sre_parse:\n\n while True:\n c = self.next\n self.__next()\n if c is None:\n if not result:\n raise self.error(\"missing group name\")\n raise self.error(\"missing %s, unterminated name\" % terminator,\n len(result))\n if c == terminator:\n if not result:\n raise self.error(\"missing group name\", 1)\n break\n result += c\n return result\n\nThis is the natural way to write this code given current Python loop\ncontrol syntax. However, given break if, it would be more natural to\nwrite this as follows:\n\n while:\n c = self.next\n self.__next()\n break if c is None or c == terminator\n result += c\n if not result:\n raise self.error(\"missing group name\")\n elif c is None:\n raise self.error(\"missing %s, unterminated name\" % terminator,\n len(result))\n return result\n\nThis form moves the error handling out of the loop body, leaving the\nloop logic much more understandable. While it would certainly be\npossible to write the code this way using the current syntax, the\nproposed syntax makes it more natural to write it in the clearer form.\n\nThe proposed syntax also provides a natural, Pythonic spelling of the\nclassic repeat ... until construct found in other\nlanguages, and for which no good syntax has previously been found for\nPython:\n\n while:\n ...\n break if \n\nThe tarfile module, for example, has a couple of \"read until\" loops like\nthe following:\n\n while True:\n s = self.__read(1)\n if not s or s == NUL:\n break\n\nWith the new syntax this would read more clearly:\n\n while:\n s = self.__read(1)\n break if not s or s == NUL\n\nThe case for extending this syntax to continue is less strong, but\nbuttressed by the value of consistency.\n\nIt is much more common for a continue statement to be at the end of a\nmultiline if suite, such as this example from zipfile :\n\n while True:\n try:\n self.fp = io.open(file, filemode)\n except OSError:\n if filemode in modeDict:\n filemode = modeDict[filemode]\n continue\n raise\n break\n\nThe only opportunity for improvement the new syntax would offer for this\nloop would be the omission of the True token.\n\nOn the other hand, consider this example from uuid.py:\n\n for i in range(adapters.length):\n ncb.Reset()\n ncb.Command = netbios.NCBRESET\n ncb.Lana_num = ord(adapters.lana[i])\n if win32wnet.Netbios(ncb) != 0:\n continue\n ncb.Reset()\n ncb.Command = netbios.NCBASTAT\n ncb.Lana_num = ord(adapters.lana[i])\n ncb.Callname = '*'.ljust(16)\n ncb.Buffer = status = netbios.ADAPTER_STATUS()\n if win32wnet.Netbios(ncb) != 0:\n continue\n status._unpack()\n bytes = status.adapter_address[:6]\n if len(bytes) != 6:\n continue\n return int.from_bytes(bytes, 'big')\n\nThis becomes:\n\n for i in range(adapters.length):\n ncb.Reset()\n ncb.Command = netbios.NCBRESET\n ncb.Lana_num = ord(adapters.lana[i])\n continue if win32wnet.Netbios(ncb) != 0\n ncb.Reset()\n ncb.Command = netbios.NCBASTAT\n ncb.Lana_num = ord(adapters.lana[i])\n ncb.Callname = '*'.ljust(16)\n ncb.Buffer = status = netbios.ADAPTER_STATUS()\n continue if win32wnet.Netbios(ncb) != 0\n status._unpack()\n bytes = status.adapter_address[:6]\n continue if len(bytes) != 6\n return int.from_bytes(bytes, 'big')\n\nThis example indicates that there are non-trivial use cases where\ncontinue if also improves the readability of the loop code.\n\nIt is probably significant to note that all of the examples selected for\nthis PEP were found by grepping the standard library for while True and\ncontinue, and the relevant examples were found in the first four modules\ninspected.\n\nCopyright\n\nThis document is placed in the public domain."},"source":{"kind":"string","value":"python-peps"},"added":{"kind":"string","value":"2024-10-18T13:23:34.074898"},"created":{"kind":"timestamp","value":"2017-09-05T00:00:00","string":"2017-09-05T00:00:00"},"metadata":{"kind":"string","value":"{\n \"license\": \"Public Domain\",\n \"url\": \"https://peps.python.org/pep-0548/\",\n \"authors\": [\n \"R David Murray\"\n ],\n \"pep_number\": \"0548\",\n \"pandoc_version\": \"3.5\"\n}"}}},{"rowIdx":595,"cells":{"id":{"kind":"string","value":"0455"},"text":{"kind":"string","value":"PEP: 455 Title: Adding a key-transforming dictionary to collections\nVersion: $Revision$ Last-Modified: $Date$ Author: Antoine Pitrou\n BDFL-Delegate: Raymond Hettinger Status: Rejected\nType: Standards Track Content-Type: text/x-rst Created: 13-Sep-2013\nPython-Version: 3.5 Post-History:\n\nAbstract\n\nThis PEP proposes a new data structure for the collections module,\ncalled \"TransformDict\" in this PEP. This structure is a mutable mapping\nwhich transforms the key using a given function when doing a lookup, but\nretains the original key when reading.\n\nRejection\n\nSee the rationale at\nhttps://mail.python.org/pipermail/python-dev/2015-May/140003.html and\nfor an earlier partial review, see\nhttps://mail.python.org/pipermail/python-dev/2013-October/129937.html .\n\nRationale\n\nNumerous specialized versions of this pattern exist. The most common is\na case-insensitive case-preserving dict, i.e. a dict-like container\nwhich matches keys in a case-insensitive fashion but retains the\noriginal casing. It is a very common need in network programming, as\nmany protocols feature some arrays of \"key / value\" properties in their\nmessages, where the keys are textual strings whose case is specified to\nbe ignored on receipt but by either specification or custom is to be\npreserved or non-trivially canonicalized when retransmitted.\n\nAnother common request is an identity dict, where keys are matched\naccording to their respective id()s instead of normal matching.\n\nBoth are instances of a more general pattern, where a given\ntransformation function is applied to keys when looking them up: that\nfunction being str.lower or str.casefold in the former example and the\nbuilt-in id function in the latter.\n\n(It could be said that the pattern projects keys from the user-visible\nset onto the internal lookup set.)\n\nSemantics\n\nTransformDict is a MutableMapping implementation: it faithfully\nimplements the well-known API of mutable mappings, like dict itself and\nother dict-like classes in the standard library. Therefore, this PEP\nwon't rehash the semantics of most TransformDict methods.\n\nThe transformation function needn't be bijective, it can be strictly\nsurjective as in the case-insensitive example (in other words, different\nkeys can lookup the same value):\n\n >>> d = TransformDict(str.casefold)\n >>> d['SomeKey'] = 5\n >>> d['somekey']\n 5\n >>> d['SOMEKEY']\n 5\n\nTransformDict retains the first key used when creating an entry:\n\n >>> d = TransformDict(str.casefold)\n >>> d['SomeKey'] = 1\n >>> d['somekey'] = 2\n >>> list(d.items())\n [('SomeKey', 2)]\n\nThe original keys needn't be hashable, as long as the transformation\nfunction returns a hashable one:\n\n >>> d = TransformDict(id)\n >>> l = [None]\n >>> d[l] = 5\n >>> l in d\n True\n\nConstructor\n\nAs shown in the examples above, creating a TransformDict requires\npassing the key transformation function as the first argument (much like\ncreating a defaultdict requires passing the factory function as first\nargument).\n\nThe constructor also takes other optional arguments which can be used to\ninitialize the TransformDict with certain key-value pairs. Those\noptional arguments are the same as in the dict and defaultdict\nconstructors:\n\n >>> d = TransformDict(str.casefold, [('Foo', 1)], Bar=2)\n >>> sorted(d.items())\n [('Bar', 2), ('Foo', 1)]\n\nGetting the original key\n\nTransformDict also features a lookup method returning the stored key\ntogether with the corresponding value:\n\n >>> d = TransformDict(str.casefold, {'Foo': 1})\n >>> d.getitem('FOO')\n ('Foo', 1)\n >>> d.getitem('bar')\n Traceback (most recent call last):\n File \"\", line 1, in \n KeyError: 'bar'\n\nThe method name getitem() follows the standard popitem() method on\nmutable mappings.\n\nGetting the transformation function\n\nTransformDict has a simple read-only property transform_func which gives\nback the transformation function.\n\nAlternative proposals and questions\n\nRetaining the last original key\n\nMost python-dev respondents found retaining the first user-supplied key\nmore intuitive than retaining the last. Also, it matches the dict\nobject's own behaviour when using different but equal keys:\n\n >>> d = {}\n >>> d[1] = 'hello'\n >>> d[1.0] = 'world'\n >>> d\n {1: 'world'}\n\nFurthermore, explicitly retaining the last key in a first-key-retaining\nscheme is still possible using the following approach:\n\n d.pop(key, None)\n d[key] = value\n\nwhile the converse (retaining the first key in a last-key-retaining\nscheme) doesn't look possible without rewriting part of the container's\ncode.\n\nUsing an encoder / decoder pair\n\nUsing a function pair isn't necessary, since the original key is\nretained by the container. Moreover, an encoder / decoder pair would\nrequire the transformation to be bijective, which prevents important use\ncases like case-insensitive matching.\n\nProviding a transformation function for values\n\nDictionary values are not used for lookup, their semantics are totally\nirrelevant to the container's operation. Therefore, there is no point in\nhaving both an \"original\" and a \"transformed\" value: the transformed\nvalue wouldn't be used for anything.\n\nProviding a specialized container, not generic\n\nIt was asked why we would provide the generic TransformDict construct\nrather than a specialized case-insensitive dict variant. The answer is\nthat it's nearly as cheap (code-wise and performance-wise) to provide\nthe generic construct, and it can fill more use cases.\n\nEven case-insensitive dicts can actually elicit different transformation\nfunctions: str.lower, str.casefold or in some cases bytes.lower when\nworking with text encoded in an ASCII-compatible encoding.\n\nOther constructor patterns\n\nTwo other constructor patterns were proposed by Serhiy Storchaka:\n\n- A type factory scheme:\n\n d = TransformDict(str.casefold)(Foo=1)\n\n- A subclassing scheme:\n\n class CaseInsensitiveDict(TransformDict):\n __transform__ = str.casefold\n\n d = CaseInsensitiveDict(Foo=1)\n\nWhile both approaches can be defended, they don't follow established\npractices in the standard library, and therefore were rejected.\n\nImplementation\n\nA patch for the collections module is tracked on the bug tracker at\nhttp://bugs.python.org/issue18986.\n\nExisting work\n\nCase-insensitive dicts are a popular request:\n\n- http://twistedmatrix.com/documents/current/api/twisted.python.util.InsensitiveDict.html\n- https://mail.python.org/pipermail/python-list/2013-May/647243.html\n- https://mail.python.org/pipermail/python-list/2005-April/296208.html\n- https://mail.python.org/pipermail/python-list/2004-June/241748.html\n- http://bugs.python.org/msg197376\n- http://stackoverflow.com/a/2082169\n- http://stackoverflow.com/a/3296782\n- http://code.activestate.com/recipes/66315-case-insensitive-dictionary/\n- https://gist.github.com/babakness/3901174\n- http://www.wikier.org/blog/key-insensitive-dictionary-in-python\n- http://en.sharejs.com/python/14534\n- http://www.voidspace.org.uk/python/archive.shtml#caseless\n\nIdentity dicts have been requested too:\n\n- https://mail.python.org/pipermail/python-ideas/2010-May/007235.html\n- http://www.gossamer-threads.com/lists/python/python/209527\n\nSeveral modules in the standard library use identity lookups for object\nmemoization, for example pickle, json, copy, cProfile, doctest and\n_threading_local.\n\nOther languages\n\nC# / .Net\n\n.Net has a generic Dictionary class where you can specify a custom\nIEqualityComparer: http://msdn.microsoft.com/en-us/library/xfhwa508.aspx\n\nUsing it is the recommended way to write case-insensitive dictionaries:\nhttp://stackoverflow.com/questions/13230414/case-insensitive-access-for-generic-dictionary\n\nJava\n\nJava has a specialized CaseInsensitiveMap:\nhttp://commons.apache.org/proper/commons-collections/apidocs/org/apache/commons/collections4/map/CaseInsensitiveMap.html\n\nIt also has a separate IdentityHashMap:\nhttp://docs.oracle.com/javase/6/docs/api/java/util/IdentityHashMap.html\n\nC++\n\nThe C++ Standard Template Library features an unordered_map with\ncustomizable hash and equality functions:\nhttp://www.cplusplus.com/reference/unordered_map/unordered_map/\n\nCopyright\n\nThis document has been placed in the public domain.\n\n\f\n\n Local Variables: mode: indented-text indent-tabs-mode: nil\n sentence-end-double-space: t fill-column: 70 coding: utf-8 End:"},"source":{"kind":"string","value":"python-peps"},"added":{"kind":"string","value":"2024-10-18T13:23:34.087549"},"created":{"kind":"timestamp","value":"2013-09-13T00:00:00","string":"2013-09-13T00:00:00"},"metadata":{"kind":"string","value":"{\n \"license\": \"Public Domain\",\n \"url\": \"https://peps.python.org/pep-0455/\",\n \"authors\": [\n \"Antoine Pitrou\"\n ],\n \"pep_number\": \"0455\",\n \"pandoc_version\": \"3.5\"\n}"}}},{"rowIdx":596,"cells":{"id":{"kind":"string","value":"0377"},"text":{"kind":"string","value":"PEP: 377 Title: Allow __enter__() methods to skip the statement body\nVersion: $Revision$ Last-Modified: $Date$ Author: Alyssa Coghlan\n Status: Rejected Type: Standards Track\nContent-Type: text/x-rst Created: 08-Mar-2009 Python-Version: 2.7, 3.1\nPost-History: 08-Mar-2009\n\nAbstract\n\nThis PEP proposes a backwards compatible mechanism that allows\n__enter__() methods to skip the body of the associated with statement.\nThe lack of this ability currently means the contextlib.contextmanager\ndecorator is unable to fulfil its specification of being able to turn\narbitrary code into a context manager by moving it into a generator\nfunction with a yield in the appropriate location. One symptom of this\nis that contextlib.nested will currently raise RuntimeError in\nsituations where writing out the corresponding nested with statements\nwould not[1].\n\nThe proposed change is to introduce a new flow control exception\nSkipStatement, and skip the execution of the with statement body if\n__enter__() raises this exception.\n\nPEP Rejection\n\nThis PEP was rejected by Guido[2] as it imposes too great an increase in\ncomplexity without a proportional increase in expressiveness and\ncorrectness. In the absence of compelling use cases that need the more\ncomplex semantics proposed by this PEP the existing behaviour is\nconsidered acceptable.\n\nProposed Change\n\nThe semantics of the with statement will be changed to include a new\ntry/except/else block around the call to __enter__(). If SkipStatement\nis raised by the __enter__() method, then the main section of the with\nstatement (now located in the else clause) will not be executed. To\navoid leaving the names in any as clause unbound in this case, a new\nStatementSkipped singleton (similar to the existing NotImplemented\nsingleton) will be assigned to all names that appear in the as clause.\n\nThe components of the with statement remain as described in PEP 343:\n\n with EXPR as VAR:\n BLOCK\n\nAfter the modification, the with statement semantics would be as\nfollows:\n\n mgr = (EXPR)\n exit = mgr.__exit__ # Not calling it yet\n try:\n value = mgr.__enter__()\n except SkipStatement:\n VAR = StatementSkipped\n # Only if \"as VAR\" is present and\n # VAR is a single name\n # If VAR is a tuple of names, then StatementSkipped\n # will be assigned to each name in the tuple\n else:\n exc = True\n try:\n try:\n VAR = value # Only if \"as VAR\" is present\n BLOCK\n except:\n # The exceptional case is handled here\n exc = False\n if not exit(*sys.exc_info()):\n raise\n # The exception is swallowed if exit() returns true\n finally:\n # The normal and non-local-goto cases are handled here\n if exc:\n exit(None, None, None)\n\nWith the above change in place for the with statement semantics,\ncontextlib.contextmanager() will then be modified to raise SkipStatement\ninstead of RuntimeError when the underlying generator doesn't yield.\n\nRationale for Change\n\nCurrently, some apparently innocuous context managers may raise\nRuntimeError when executed. This occurs when the context manager's\n__enter__() method encounters a situation where the written out version\nof the code corresponding to the context manager would skip the code\nthat is now the body of the with statement. Since the __enter__() method\nhas no mechanism available to signal this to the interpreter, it is\ninstead forced to raise an exception that not only skips the body of the\nwith statement, but also jumps over all code until the nearest exception\nhandler. This goes against one of the design goals of the with\nstatement, which was to be able to factor out arbitrary common exception\nhandling code into a single context manager by putting into a generator\nfunction and replacing the variant part of the code with a yield\nstatement.\n\nSpecifically, the following examples behave differently if\ncmB().__enter__() raises an exception which cmA().__exit__() then\nhandles and suppresses:\n\n with cmA():\n with cmB():\n do_stuff()\n # This will resume here without executing \"do_stuff()\"\n\n @contextlib.contextmanager\n def combined():\n with cmA():\n with cmB():\n yield\n\n with combined():\n do_stuff()\n # This will raise a RuntimeError complaining that the context\n # manager's underlying generator didn't yield\n\n with contextlib.nested(cmA(), cmB()):\n do_stuff()\n # This will raise the same RuntimeError as the contextmanager()\n # example (unsurprising, given that the nested() implementation\n # uses contextmanager())\n\n # The following class based version shows that the issue isn't\n # specific to contextlib.contextmanager() (it also shows how\n # much simpler it is to write context managers as generators\n # instead of as classes!)\n class CM(object):\n def __init__(self):\n self.cmA = None\n self.cmB = None\n\n def __enter__(self):\n if self.cmA is not None:\n raise RuntimeError(\"Can't re-use this CM\")\n self.cmA = cmA()\n self.cmA.__enter__()\n try:\n self.cmB = cmB()\n self.cmB.__enter__()\n except:\n self.cmA.__exit__(*sys.exc_info())\n # Can't suppress in __enter__(), so must raise\n raise\n\n def __exit__(self, *args):\n suppress = False\n try:\n if self.cmB is not None:\n suppress = self.cmB.__exit__(*args)\n except:\n suppress = self.cmA.__exit__(*sys.exc_info()):\n if not suppress:\n # Exception has changed, so reraise explicitly\n raise\n else:\n if suppress:\n # cmB already suppressed the exception,\n # so don't pass it to cmA\n suppress = self.cmA.__exit__(None, None, None):\n else:\n suppress = self.cmA.__exit__(*args):\n return suppress\n\nWith the proposed semantic change in place, the contextlib based\nexamples above would then \"just work\", but the class based version would\nneed a small adjustment to take advantage of the new semantics:\n\n class CM(object):\n def __init__(self):\n self.cmA = None\n self.cmB = None\n\n def __enter__(self):\n if self.cmA is not None:\n raise RuntimeError(\"Can't re-use this CM\")\n self.cmA = cmA()\n self.cmA.__enter__()\n try:\n self.cmB = cmB()\n self.cmB.__enter__()\n except:\n if self.cmA.__exit__(*sys.exc_info()):\n # Suppress the exception, but don't run\n # the body of the with statement either\n raise SkipStatement\n raise\n\n def __exit__(self, *args):\n suppress = False\n try:\n if self.cmB is not None:\n suppress = self.cmB.__exit__(*args)\n except:\n suppress = self.cmA.__exit__(*sys.exc_info()):\n if not suppress:\n # Exception has changed, so reraise explicitly\n raise\n else:\n if suppress:\n # cmB already suppressed the exception,\n # so don't pass it to cmA\n suppress = self.cmA.__exit__(None, None, None):\n else:\n suppress = self.cmA.__exit__(*args):\n return suppress\n\nThere is currently a tentative suggestion[3] to add import-style syntax\nto the with statement to allow multiple context managers to be included\nin a single with statement without needing to use contextlib.nested. In\nthat case the compiler has the option of simply emitting multiple with\nstatements at the AST level, thus allowing the semantics of actual\nnested with statements to be reproduced accurately. However, such a\nchange would highlight rather than alleviate the problem the current PEP\naims to address: it would not be possible to use\ncontextlib.contextmanager to reliably factor out such with statements,\nas they would exhibit exactly the same semantic differences as are seen\nwith the combined() context manager in the above example.\n\nPerformance Impact\n\nImplementing the new semantics makes it necessary to store the\nreferences to the __enter__ and __exit__ methods in temporary variables\ninstead of on the stack. This results in a slight regression in with\nstatement speed relative to Python 2.6/3.1. However, implementing a\ncustom SETUP_WITH opcode would negate any differences between the two\napproaches (as well as dramatically improving speed by eliminating more\nthan a dozen unnecessary trips around the eval loop).\n\nReference Implementation\n\nPatch attached to Issue 5251[4]. That patch uses only existing opcodes\n(i.e. no SETUP_WITH).\n\nAcknowledgements\n\nJames William Pye both raised the issue and suggested the basic outline\nof the solution described in this PEP.\n\nReferences\n\nCopyright\n\nThis document has been placed in the public domain.\n\n[1] Issue 5251: contextlib.nested inconsistent with nested with\nstatements (http://bugs.python.org/issue5251)\n\n[2] Guido's rejection of the PEP\n(https://mail.python.org/pipermail/python-dev/2009-March/087263.html)\n\n[3] Import-style syntax to reduce indentation of nested with statements\n(https://mail.python.org/pipermail/python-ideas/2009-March/003188.html)\n\n[4] Issue 5251: contextlib.nested inconsistent with nested with\nstatements (http://bugs.python.org/issue5251)"},"source":{"kind":"string","value":"python-peps"},"added":{"kind":"string","value":"2024-10-18T13:23:34.097952"},"created":{"kind":"timestamp","value":"2009-03-08T00:00:00","string":"2009-03-08T00:00:00"},"metadata":{"kind":"string","value":"{\n \"license\": \"Public Domain\",\n \"url\": \"https://peps.python.org/pep-0377/\",\n \"authors\": [\n \"Alyssa Coghlan\"\n ],\n \"pep_number\": \"0377\",\n \"pandoc_version\": \"3.5\"\n}"}}},{"rowIdx":597,"cells":{"id":{"kind":"string","value":"0349"},"text":{"kind":"string","value":"PEP: 349 Title: Allow str() to return unicode strings Version:\n$Revision$ Last-Modified: $Date$ Author: Neil Schemenauer\n Status: Rejected Type: Standards Track Content-Type:\ntext/x-rst Created: 02-Aug-2005 Python-Version: 2.5 Post-History:\n06-Aug-2005 Resolution:\nhttps://mail.python.org/archives/list/python-dev@python.org/message/M2Y3PUFLAE23NPRJPVBYF6P5LW5LVN6F/\n\nAbstract\n\nThis PEP proposes to change the str() built-in function so that it can\nreturn unicode strings. This change would make it easier to write code\nthat works with either string type and would also make some existing\ncode handle unicode strings. The C function PyObject_Str() would remain\nunchanged and the function PyString_New() would be added instead.\n\nRationale\n\nPython has had a Unicode string type for some time now but use of it is\nnot yet widespread. There is a large amount of Python code that assumes\nthat string data is represented as str instances. The long-term plan for\nPython is to phase out the str type and use unicode for all string data.\nClearly, a smooth migration path must be provided.\n\nWe need to upgrade existing libraries, written for str instances, to be\nmade capable of operating in an all-unicode string world. We can't\nchange to an all-unicode world until all essential libraries are made\ncapable for it. Upgrading the libraries in one shot does not seem\nfeasible. A more realistic strategy is to individually make the\nlibraries capable of operating on unicode strings while preserving their\ncurrent all-str environment behaviour.\n\nFirst, we need to be able to write code that can accept unicode\ninstances without attempting to coerce them to str instances. Let us\nlabel such code as Unicode-safe. Unicode-safe libraries can be used in\nan all-unicode world.\n\nSecond, we need to be able to write code that, when provided only str\ninstances, will not create unicode results. Let us label such code as\nstr-stable. Libraries that are str-stable can be used by libraries and\napplications that are not yet Unicode-safe.\n\nSometimes it is simple to write code that is both str-stable and\nUnicode-safe. For example, the following function just works:\n\n def appendx(s):\n return s + 'x'\n\nThat's not too surprising since the unicode type is designed to make the\ntask easier. The principle is that when str and unicode instances meet,\nthe result is a unicode instance. One notable difficulty arises when\ncode requires a string representation of an object; an operation\ntraditionally accomplished by using the str() built-in function.\n\nUsing the current str() function makes the code not Unicode-safe.\nReplacing a str() call with a unicode() call makes the code not\nstr-stable. Changing str() so that it could return unicode instances\nwould solve this problem. As a further benefit, some code that is\ncurrently not Unicode-safe because it uses str() would become\nUnicode-safe.\n\nSpecification\n\nA Python implementation of the str() built-in follows:\n\n def str(s):\n \"\"\"Return a nice string representation of the object. The\n return value is a str or unicode instance.\n \"\"\"\n if type(s) is str or type(s) is unicode:\n return s\n r = s.__str__()\n if not isinstance(r, (str, unicode)):\n raise TypeError('__str__ returned non-string')\n return r\n\nThe following function would be added to the C API and would be the\nequivalent to the str() built-in (ideally it be called PyObject_Str, but\nchanging that function could cause a massive number of compatibility\nproblems):\n\n PyObject *PyString_New(PyObject *);\n\nA reference implementation is available on Sourceforge[1] as a patch.\n\nBackwards Compatibility\n\nSome code may require that str() returns a str instance. In the standard\nlibrary, only one such case has been found so far. The function\nemail.header_decode() requires a str instance and the\nemail.Header.decode_header() function tries to ensure this by calling\nstr() on its argument. The code was fixed by changing the line \"header =\nstr(header)\" to:\n\n if isinstance(header, unicode):\n header = header.encode('ascii')\n\nWhether this is truly a bug is questionable since decode_header() really\noperates on byte strings, not character strings. Code that passes it a\nunicode instance could itself be considered buggy.\n\nAlternative Solutions\n\nA new built-in function could be added instead of changing str(). Doing\nso would introduce virtually no backwards compatibility problems.\nHowever, since the compatibility problems are expected to rare, changing\nstr() seems preferable to adding a new built-in.\n\nThe basestring type could be changed to have the proposed behaviour,\nrather than changing str(). However, that would be confusing behaviour\nfor an abstract base type.\n\nReferences\n\nCopyright\n\nThis document has been placed in the public domain.\n\n[1] https://bugs.python.org/issue1266570"},"source":{"kind":"string","value":"python-peps"},"added":{"kind":"string","value":"2024-10-18T13:23:34.105181"},"created":{"kind":"timestamp","value":"2005-08-02T00:00:00","string":"2005-08-02T00:00:00"},"metadata":{"kind":"string","value":"{\n \"license\": \"Public Domain\",\n \"url\": \"https://peps.python.org/pep-0349/\",\n \"authors\": [\n \"Neil Schemenauer\"\n ],\n \"pep_number\": \"0349\",\n \"pandoc_version\": \"3.5\"\n}"}}},{"rowIdx":598,"cells":{"id":{"kind":"string","value":"0589"},"text":{"kind":"string","value":"PEP: 589 Title: TypedDict: Type Hints for Dictionaries with a Fixed Set\nof Keys Author: Jukka Lehtosalo Sponsor: Guido\nvan Rossum BDFL-Delegate: Guido van Rossum\n Discussions-To: typing-sig@python.org Status: Final\nType: Standards Track Topic: Typing Created: 20-Mar-2019 Python-Version:\n3.8 Post-History: Resolution:\nhttps://mail.python.org/archives/list/typing-sig@python.org/message/FDO4KFYWYQEP3U2HVVBEBR3SXPHQSHYR/\n\ntyping:typeddict and typing.TypedDict\n\nAbstract\n\nPEP 484 defines the type Dict[K, V] for uniform dictionaries, where each\nvalue has the same type, and arbitrary key values are supported. It\ndoesn't properly support the common pattern where the type of a\ndictionary value depends on the string value of the key. This PEP\nproposes a type constructor typing.TypedDict to support the use case\nwhere a dictionary object has a specific set of string keys, each with a\nvalue of a specific type.\n\nHere is an example where PEP 484 doesn't allow us to annotate\nsatisfactorily:\n\n movie = {'name': 'Blade Runner',\n 'year': 1982}\n\nThis PEP proposes the addition of a new type constructor, called\nTypedDict, to allow the type of movie to be represented precisely:\n\n from typing import TypedDict\n\n class Movie(TypedDict):\n name: str\n year: int\n\nNow a type checker should accept this code:\n\n movie: Movie = {'name': 'Blade Runner',\n 'year': 1982}\n\nMotivation\n\nRepresenting an object or structured data using (potentially nested)\ndictionaries with string keys (instead of a user-defined class) is a\ncommon pattern in Python programs. Representing JSON objects is perhaps\nthe canonical use case, and this is popular enough that Python ships\nwith a JSON library. This PEP proposes a way to allow such code to be\ntype checked more effectively.\n\nMore generally, representing pure data objects using only Python\nprimitive types such as dictionaries, strings and lists has had certain\nappeal. They are easy to serialize and deserialize even when not using\nJSON. They trivially support various useful operations with no extra\neffort, including pretty-printing (through str() and the pprint module),\niteration, and equality comparisons.\n\nPEP 484 doesn't properly support the use cases mentioned above. Let's\nconsider a dictionary object that has exactly two valid string keys,\n'name' with value type str, and 'year' with value type int. The PEP 484\ntype Dict[str, Any] would be suitable, but it is too lenient, as\narbitrary string keys can be used, and arbitrary values are valid.\nSimilarly, Dict[str, Union[str, int]] is too general, as the value for\nkey 'name' could be an int, and arbitrary string keys are allowed. Also,\nthe type of a subscription expression such as d['name'] (assuming d to\nbe a dictionary of this type) would be Union[str, int], which is too\nwide.\n\nDataclasses are a more recent alternative to solve this use case, but\nthere is still a lot of existing code that was written before\ndataclasses became available, especially in large existing codebases\nwhere type hinting and checking has proven to be helpful. Unlike\ndictionary objects, dataclasses don't directly support JSON\nserialization, though there is a third-party package that implements\nit[1].\n\nSpecification\n\nA TypedDict type represents dictionary objects with a specific set of\nstring keys, and with specific value types for each valid key. Each\nstring key can be either required (it must be present) or non-required\n(it doesn't need to exist).\n\nThis PEP proposes two ways of defining TypedDict types. The first uses a\nclass-based syntax. The second is an alternative assignment-based syntax\nthat is provided for backwards compatibility, to allow the feature to be\nbackported to older Python versions. The rationale is similar to why PEP\n484 supports a comment-based annotation syntax for Python 2.7: type\nhinting is particularly useful for large existing codebases, and these\noften need to run on older Python versions. The two syntax options\nparallel the syntax variants supported by typing.NamedTuple. Other\nproposed features include TypedDict inheritance and totality (specifying\nwhether keys are required or not).\n\nThis PEP also provides a sketch of how a type checker is expected to\nsupport type checking operations involving TypedDict objects. Similar to\nPEP 484, this discussion is left somewhat vague on purpose, to allow\nexperimentation with a wide variety of different type checking\napproaches. In particular, type compatibility should be based on\nstructural compatibility: a more specific TypedDict type can be\ncompatible with a smaller (more general) TypedDict type.\n\nClass-based Syntax\n\nA TypedDict type can be defined using the class definition syntax with\ntyping.TypedDict as the sole base class:\n\n from typing import TypedDict\n\n class Movie(TypedDict):\n name: str\n year: int\n\nMovie is a TypedDict type with two items: 'name' (with type str) and\n'year' (with type int).\n\nA type checker should validate that the body of a class-based TypedDict\ndefinition conforms to the following rules:\n\n- The class body should only contain lines with item definitions of\n the form key: value_type, optionally preceded by a docstring. The\n syntax for item definitions is identical to attribute annotations,\n but there must be no initializer, and the key name actually refers\n to the string value of the key instead of an attribute name.\n- Type comments cannot be used with the class-based syntax, for\n consistency with the class-based NamedTuple syntax. (Note that it\n would not be sufficient to support type comments for backwards\n compatibility with Python 2.7, since the class definition may have a\n total keyword argument, as discussed below, and this isn't valid\n syntax in Python 2.7.) Instead, this PEP provides an alternative,\n assignment-based syntax for backwards compatibility, discussed in\n Alternative Syntax.\n- String literal forward references are valid in the value types.\n- Methods are not allowed, since the runtime type of a TypedDict\n object will always be just dict (it is never a subclass of dict).\n- Specifying a metaclass is not allowed.\n\nAn empty TypedDict can be created by only including pass in the body (if\nthere is a docstring, pass can be omitted):\n\n class EmptyDict(TypedDict):\n pass\n\nUsing TypedDict Types\n\nHere is an example of how the type Movie can be used:\n\n movie: Movie = {'name': 'Blade Runner',\n 'year': 1982}\n\nAn explicit Movie type annotation is generally needed, as otherwise an\nordinary dictionary type could be assumed by a type checker, for\nbackwards compatibility. When a type checker can infer that a\nconstructed dictionary object should be a TypedDict, an explicit\nannotation can be omitted. A typical example is a dictionary object as a\nfunction argument. In this example, a type checker is expected to infer\nthat the dictionary argument should be understood as a TypedDict:\n\n def record_movie(movie: Movie) -> None: ...\n\n record_movie({'name': 'Blade Runner', 'year': 1982})\n\nAnother example where a type checker should treat a dictionary display\nas a TypedDict is in an assignment to a variable with a previously\ndeclared TypedDict type:\n\n movie: Movie\n ...\n movie = {'name': 'Blade Runner', 'year': 1982}\n\nOperations on movie can be checked by a static type checker:\n\n movie['director'] = 'Ridley Scott' # Error: invalid key 'director'\n movie['year'] = '1982' # Error: invalid value type (\"int\" expected)\n\nThe code below should be rejected, since 'title' is not a valid key, and\nthe 'name' key is missing:\n\n movie2: Movie = {'title': 'Blade Runner',\n 'year': 1982}\n\nThe created TypedDict type object is not a real class object. Here are\nthe only uses of the type a type checker is expected to allow:\n\n- It can be used in type annotations and in any context where an\n arbitrary type hint is valid, such as in type aliases and as the\n target type of a cast.\n\n- It can be used as a callable object with keyword arguments\n corresponding to the TypedDict items. Non-keyword arguments are not\n allowed. Example:\n\n m = Movie(name='Blade Runner', year=1982)\n\n When called, the TypedDict type object returns an ordinary\n dictionary object at runtime:\n\n print(type(m)) # \n\n- It can be used as a base class, but only when defining a derived\n TypedDict. This is discussed in more detail below.\n\nIn particular, TypedDict type objects cannot be used in isinstance()\ntests such as isinstance(d, Movie). The reason is that there is no\nexisting support for checking types of dictionary item values, since\nisinstance() does not work with many PEP 484 types, including common\nones like List[str]. This would be needed for cases like this:\n\n class Strings(TypedDict):\n items: List[str]\n\n print(isinstance({'items': [1]}, Strings)) # Should be False\n print(isinstance({'items': ['x']}, Strings)) # Should be True\n\nThe above use case is not supported. This is consistent with how\nisinstance() is not supported for List[str].\n\nInheritance\n\nIt is possible for a TypedDict type to inherit from one or more\nTypedDict types using the class-based syntax. In this case the TypedDict\nbase class should not be included. Example:\n\n class BookBasedMovie(Movie):\n based_on: str\n\nNow BookBasedMovie has keys name, year, and based_on. It is equivalent\nto this definition, since TypedDict types use structural compatibility:\n\n class BookBasedMovie(TypedDict):\n name: str\n year: int\n based_on: str\n\nHere is an example of multiple inheritance:\n\n class X(TypedDict):\n x: int\n\n class Y(TypedDict):\n y: str\n\n class XYZ(X, Y):\n z: bool\n\nThe TypedDict XYZ has three items: x (type int), y (type str), and z\n(type bool).\n\nA TypedDict cannot inherit from both a TypedDict type and a\nnon-TypedDict base class.\n\nAdditional notes on TypedDict class inheritance:\n\n- Changing a field type of a parent TypedDict class in a subclass is\n not allowed. Example:\n\n class X(TypedDict):\n x: str\n\n class Y(X):\n x: int # Type check error: cannot overwrite TypedDict field \"x\"\n\n In the example outlined above TypedDict class annotations returns\n type str for key x:\n\n print(Y.__annotations__) # {'x': }\n\n- Multiple inheritance does not allow conflict types for the same name\n field:\n\n class X(TypedDict):\n x: int\n\n class Y(TypedDict):\n x: str\n\n class XYZ(X, Y): # Type check error: cannot overwrite TypedDict field \"x\" while merging\n xyz: bool\n\nTotality\n\nBy default, all keys must be present in a TypedDict. It is possible to\noverride this by specifying totality. Here is how to do this using the\nclass-based syntax:\n\n class Movie(TypedDict, total=False):\n name: str\n year: int\n\nThis means that a Movie TypedDict can have any of the keys omitted. Thus\nthese are valid:\n\n m: Movie = {}\n m2: Movie = {'year': 2015}\n\nA type checker is only expected to support a literal False or True as\nthe value of the total argument. True is the default, and makes all\nitems defined in the class body be required.\n\nThe totality flag only applies to items defined in the body of the\nTypedDict definition. Inherited items won't be affected, and instead use\ntotality of the TypedDict type where they were defined. This makes it\npossible to have a combination of required and non-required keys in a\nsingle TypedDict type.\n\nAlternative Syntax\n\nThis PEP also proposes an alternative syntax that can be backported to\nolder Python versions such as 3.5 and 2.7 that don't support the\nvariable definition syntax introduced in PEP 526. It resembles the\ntraditional syntax for defining named tuples:\n\n Movie = TypedDict('Movie', {'name': str, 'year': int})\n\nIt is also possible to specify totality using the alternative syntax:\n\n Movie = TypedDict('Movie',\n {'name': str, 'year': int},\n total=False)\n\nThe semantics are equivalent to the class-based syntax. This syntax\ndoesn't support inheritance, however, and there is no way to have both\nrequired and non-required fields in a single type. The motivation for\nthis is keeping the backwards compatible syntax as simple as possible\nwhile covering the most common use cases.\n\nA type checker is only expected to accept a dictionary display\nexpression as the second argument to TypedDict. In particular, a\nvariable that refers to a dictionary object does not need to be\nsupported, to simplify implementation.\n\nType Consistency\n\nInformally speaking, type consistency is a generalization of the\nis-subtype-of relation to support the Any type. It is defined more\nformally in PEP 483. This section introduces the new, non-trivial rules\nneeded to support type consistency for TypedDict types.\n\nFirst, any TypedDict type is consistent with Mapping[str, object].\nSecond, a TypedDict type A is consistent with TypedDict B if A is\nstructurally compatible with B. This is true if and only if both of\nthese conditions are satisfied:\n\n- For each key in B, A has the corresponding key and the corresponding\n value type in A is consistent with the value type in B. For each key\n in B, the value type in B is also consistent with the corresponding\n value type in A.\n- For each required key in B, the corresponding key is required in A.\n For each non-required key in B, the corresponding key is not\n required in A.\n\nDiscussion:\n\n- Value types behave invariantly, since TypedDict objects are mutable.\n This is similar to mutable container types such as List and Dict.\n Example where this is relevant:\n\n class A(TypedDict):\n x: Optional[int]\n\n class B(TypedDict):\n x: int\n\n def f(a: A) -> None:\n a['x'] = None\n\n b: B = {'x': 0}\n f(b) # Type check error: 'B' not compatible with 'A'\n b['x'] + 1 # Runtime error: None + 1\n\n- A TypedDict type with a required key is not consistent with a\n TypedDict type where the same key is a non-required key, since the\n latter allows keys to be deleted. Example where this is relevant:\n\n class A(TypedDict, total=False):\n x: int\n\n class B(TypedDict):\n x: int\n\n def f(a: A) -> None:\n del a['x']\n\n b: B = {'x': 0}\n f(b) # Type check error: 'B' not compatible with 'A'\n b['x'] + 1 # Runtime KeyError: 'x'\n\n- A TypedDict type A with no key 'x' is not consistent with a\n TypedDict type with a non-required key 'x', since at runtime the key\n 'x' could be present and have an incompatible type (which may not be\n visible through A due to structural subtyping). Example:\n\n class A(TypedDict, total=False):\n x: int\n y: int\n\n class B(TypedDict, total=False):\n x: int\n\n class C(TypedDict, total=False):\n x: int\n y: str\n\n def f(a: A) -> None:\n a['y'] = 1\n\n def g(b: B) -> None:\n f(b) # Type check error: 'B' incompatible with 'A'\n\n c: C = {'x': 0, 'y': 'foo'}\n g(c)\n c['y'] + 'bar' # Runtime error: int + str\n\n- A TypedDict isn't consistent with any Dict[...] type, since\n dictionary types allow destructive operations, including clear().\n They also allow arbitrary keys to be set, which would compromise\n type safety. Example:\n\n class A(TypedDict):\n x: int\n\n class B(A):\n y: str\n\n def f(d: Dict[str, int]) -> None:\n d['y'] = 0\n\n def g(a: A) -> None:\n f(a) # Type check error: 'A' incompatible with Dict[str, int]\n\n b: B = {'x': 0, 'y': 'foo'}\n g(b)\n b['y'] + 'bar' # Runtime error: int + str\n\n- A TypedDict with all int values is not consistent with\n Mapping[str, int], since there may be additional non-int values not\n visible through the type, due to structural subtyping. These can be\n accessed using the values() and items() methods in Mapping, for\n example. Example:\n\n class A(TypedDict):\n x: int\n\n class B(TypedDict):\n x: int\n y: str\n\n def sum_values(m: Mapping[str, int]) -> int:\n n = 0\n for v in m.values():\n n += v # Runtime error\n return n\n\n def f(a: A) -> None:\n sum_values(a) # Error: 'A' incompatible with Mapping[str, int]\n\n b: B = {'x': 0, 'y': 'foo'}\n f(b)\n\nSupported and Unsupported Operations\n\nType checkers should support restricted forms of most dict operations on\nTypedDict objects. The guiding principle is that operations not\ninvolving Any types should be rejected by type checkers if they may\nviolate runtime type safety. Here are some of the most important type\nsafety violations to prevent:\n\n1. A required key is missing.\n2. A value has an invalid type.\n3. A key that is not defined in the TypedDict type is added.\n\nA key that is not a literal should generally be rejected, since its\nvalue is unknown during type checking, and thus can cause some of the\nabove violations. (Use of Final Values and Literal Types generalizes\nthis to cover final names and literal types.)\n\nThe use of a key that is not known to exist should be reported as an\nerror, even if this wouldn't necessarily generate a runtime type error.\nThese are often mistakes, and these may insert values with an invalid\ntype if structural subtyping hides the types of certain items. For\nexample, d['x'] = 1 should generate a type check error if 'x' is not a\nvalid key for d (which is assumed to be a TypedDict type).\n\nExtra keys included in TypedDict object construction should also be\ncaught. In this example, the director key is not defined in Movie and is\nexpected to generate an error from a type checker:\n\n m: Movie = dict(\n name='Alien',\n year=1979,\n director='Ridley Scott') # error: Unexpected key 'director'\n\nType checkers should reject the following operations on TypedDict\nobjects as unsafe, even though they are valid for normal dictionaries:\n\n- Operations with arbitrary str keys (instead of string literals or\n other expressions with known string values) should generally be\n rejected. This involves both destructive operations such as setting\n an item and read-only operations such as subscription expressions.\n As an exception to the above rule, d.get(e) and e in d should be\n allowed for TypedDict objects, for an arbitrary expression e with\n type str. The motivation is that these are safe and can be useful\n for introspecting TypedDict objects. The static type of d.get(e)\n should be object if the string value of e cannot be determined\n statically.\n- clear() is not safe since it could remove required keys, some of\n which may not be directly visible because of structural subtyping.\n popitem() is similarly unsafe, even if all known keys are not\n required (total=False).\n- del obj['key'] should be rejected unless 'key' is a non-required\n key.\n\nType checkers may allow reading an item using d['x'] even if the key 'x'\nis not required, instead of requiring the use of d.get('x') or an\nexplicit 'x' in d check. The rationale is that tracking the existence of\nkeys is difficult to implement in full generality, and that disallowing\nthis could require many changes to existing code.\n\nThe exact type checking rules are up to each type checker to decide. In\nsome cases potentially unsafe operations may be accepted if the\nalternative is to generate false positive errors for idiomatic code.\n\nUse of Final Values and Literal Types\n\nType checkers should allow final names (PEP 591) with string values to\nbe used instead of string literals in operations on TypedDict objects.\nFor example, this is valid:\n\n YEAR: Final = 'year'\n\n m: Movie = {'name': 'Alien', 'year': 1979}\n years_since_epoch = m[YEAR] - 1970\n\nSimilarly, an expression with a suitable literal type (PEP 586) can be\nused instead of a literal value:\n\n def get_value(movie: Movie,\n key: Literal['year', 'name']) -> Union[int, str]:\n return movie[key]\n\nType checkers are only expected to support actual string literals, not\nfinal names or literal types, for specifying keys in a TypedDict type\ndefinition. Also, only a boolean literal can be used to specify totality\nin a TypedDict definition. The motivation for this is to make type\ndeclarations self-contained, and to simplify the implementation of type\ncheckers.\n\nBackwards Compatibility\n\nTo retain backwards compatibility, type checkers should not infer a\nTypedDict type unless it is sufficiently clear that this is desired by\nthe programmer. When unsure, an ordinary dictionary type should be\ninferred. Otherwise existing code that type checks without errors may\nstart generating errors once TypedDict support is added to the type\nchecker, since TypedDict types are more restrictive than dictionary\ntypes. In particular, they aren't subtypes of dictionary types.\n\nReference Implementation\n\nThe mypy[2] type checker supports TypedDict types. A reference\nimplementation of the runtime component is provided in the\ntyping_extensions[3] module. The original implementation was in the\nmypy_extensions[4] module.\n\nRejected Alternatives\n\nSeveral proposed ideas were rejected. The current set of features seem\nto cover a lot of ground, and it was not clear which of the proposed\nextensions would be more than marginally useful. This PEP defines a\nbaseline feature that can be potentially extended later.\n\nThese are rejected on principle, as incompatible with the spirit of this\nproposal:\n\n- TypedDict isn't extensible, and it addresses only a specific use\n case. TypedDict objects are regular dictionaries at runtime, and\n TypedDict cannot be used with other dictionary-like or mapping-like\n classes, including subclasses of dict. There is no way to add\n methods to TypedDict types. The motivation here is simplicity.\n- TypedDict type definitions could plausibly used to perform runtime\n type checking of dictionaries. For example, they could be used to\n validate that a JSON object conforms to the schema specified by a\n TypedDict type. This PEP doesn't include such functionality, since\n the focus of this proposal is static type checking only, and other\n existing types do not support this, as discussed in Class-based\n syntax. Such functionality can be provided by a third-party library\n using the typing_inspect[5] third-party module, for example.\n- TypedDict types can't be used in isinstance() or issubclass()\n checks. The reasoning is similar to why runtime type checks aren't\n supported in general with many type hints.\n\nThese features were left out from this PEP, but they are potential\nextensions to be added in the future:\n\n- TypedDict doesn't support providing a default value type for keys\n that are not explicitly defined. This would allow arbitrary keys to\n be used with a TypedDict object, and only explicitly enumerated keys\n would receive special treatment compared to a normal, uniform\n dictionary type.\n- There is no way to individually specify whether each key is required\n or not. No proposed syntax was clear enough, and we expect that\n there is limited need for this.\n- TypedDict can't be used for specifying the type of a **kwargs\n argument. This would allow restricting the allowed keyword arguments\n and their types. According to PEP 484, using a TypedDict type as the\n type of **kwargs means that the TypedDict is valid as the value of\n arbitrary keyword arguments, but it doesn't restrict which keyword\n arguments should be allowed. The syntax **kwargs: Expand[T] has been\n proposed for this[6].\n\nAcknowledgements\n\nDavid Foster contributed the initial implementation of TypedDict types\nto mypy. Improvements to the implementation have been contributed by at\nleast the author (Jukka Lehtosalo), Ivan Levkivskyi, Gareth T, Michael\nLee, Dominik Miedzinski, Roy Williams and Max Moroz.\n\nReferences\n\nCopyright\n\nThis document has been placed in the public domain.\n\n[1] Dataclasses JSON (https://github.com/lidatong/dataclasses-json)\n\n[2] http://www.mypy-lang.org/\n\n[3] https://github.com/python/typing/tree/master/typing_extensions\n\n[4] https://github.com/python/mypy_extensions\n\n[5] https://github.com/ilevkivskyi/typing_inspect\n\n[6] https://github.com/python/mypy/issues/4441"},"source":{"kind":"string","value":"python-peps"},"added":{"kind":"string","value":"2024-10-18T13:23:34.223993"},"created":{"kind":"timestamp","value":"2019-03-20T00:00:00","string":"2019-03-20T00:00:00"},"metadata":{"kind":"string","value":"{\n \"license\": \"Public Domain\",\n \"url\": \"https://peps.python.org/pep-0589/\",\n \"authors\": [\n \"Jukka Lehtosalo\"\n ],\n \"pep_number\": \"0589\",\n \"pandoc_version\": \"3.5\"\n}"}}},{"rowIdx":599,"cells":{"id":{"kind":"string","value":"0431"},"text":{"kind":"string","value":"PEP: 431 Title: Time zone support improvements Version: $Revision$\nLast-Modified: $Date$ Author: Lennart Regebro \nBDFL-Delegate: Barry Warsaw Status: Superseded Type:\nStandards Track Content-Type: text/x-rst Created: 11-Dec-2012\nPost-History: 11-Dec-2012, 28-Dec-2012, 28-Jan-2013 Superseded-By: 615\n\nAbstract\n\nThis PEP proposes the implementation of concrete time zone support in\nthe Python standard library, and also improvements to the time zone API\nto deal with ambiguous time specifications during DST changes.\n\nWithdrawal\n\nAfter lengthy discussion it has turned out that the things I thought was\nproblem in datetime's implementation are intentional. Those include\ncompletely ignoring DST transitions when making date time arithmetic.\nThat makes the is_dst flags part of this PEP pointless, as they would\nhave no useful function. datetime by design does not separate between\nambiguous datetimes and will never do so.\n\nI therefore withdraw this PEP.\n\nUPDATE: The PEP 615 \"Support for the IANA Time Zone Database in the\nStandard Library\" added the zoneinfo module to Python 3.9 and superseded\nthis PEP.\n\nProposal\n\nConcrete time zone support\n\nThe time zone support in Python has no concrete implementation in the\nstandard library outside of a tzinfo baseclass that supports fixed\noffsets. To properly support time zones you need to include a database\nover all time zones, both current and historical, including daylight\nsaving changes. But such information changes frequently, so even if we\ninclude the last information in a Python release, that information would\nbe outdated just a few months later.\n\nTime zone support has therefore only been available through two\nthird-party modules, pytz and dateutil, both who include and wrap the\n\"zoneinfo\" database. This database, also called \"tz\" or \"The Olsen\ndatabase\", is the de facto standard time zone database over time zones,\nand it is included in most Unix and Unix-like operating systems,\nincluding OS X.\n\nThis gives us the opportunity to include the code that supports the\nzoneinfo data in the standard library, but by default use the operating\nsystem's copy of the data, which typically will be kept updated by the\nupdating mechanism of the operating system or distribution.\n\nFor those who have an operating system that does not include the\nzoneinfo database, for example Windows, the Python source distribution\nwill include a copy of the zoneinfo database, and a distribution\ncontaining the latest zoneinfo database will also be available at the\nPython Package Index, so it can be easily installed with the Python\npackaging tools such as easy_install or pip. This could also be done on\nUnices that are no longer receiving updates and therefore have an\noutdated database.\n\nWith such a mechanism Python would have full-time zone support in the\nstandard library on any platform, and a simple package installation\nwould provide an updated time zone database on those platforms where the\nzoneinfo database isn't included, such as Windows, or on platforms where\nOS updates are no longer provided.\n\nThe time zone support will be implemented by making the datetime module\ninto a package, and adding time zone support to datetime based on Stuart\nBishop's pytz module.\n\nGetting the local time zone\n\nOn Unix there is no standard way of finding the name of the time zone\nthat is being used. All the information that is available is the time\nzone abbreviations, such as EST and PDT, but many of those abbreviations\nare ambiguous and therefore you can't rely on them to figure out which\ntime zone you are located in.\n\nThere is however a standard for finding the compiled time zone\ninformation since it's located in /etc/localtime. Therefore, it is\npossible to create a local time zone object with the correct time zone\ninformation even though you don't know the name of the time zone. A\nfunction in datetime should be provided to return the local time zone.\n\nThe support for this will be made by integrating Lennart Regebro's\ntzlocal module into the new datetime module.\n\nFor Windows it will look up the local Windows time zone name, and use a\nmapping between Windows time zone names and zoneinfo time zone names\nprovided by the Unicode consortium to convert that to a zoneinfo time\nzone.\n\nThe mapping should be updated before each major or bugfix release,\nscripts for doing so will be provided in the Tools/ directory.\n\nAmbiguous times\n\nWhen changing over from daylight savings time (DST) the clock is turned\nback one hour. This means that the times during that hour happens twice,\nonce with DST and then once without DST. Similarly, when changing to\ndaylight savings time, one hour goes missing.\n\nThe current time zone API can not differentiate between the two\nambiguous times during a change from DST. For example, in Stockholm the\ntime of 2012-11-28 02:00:00 happens twice, both at UTC 2012-11-28\n00:00:00 and also at 2012-11-28 01:00:00.\n\nThe current time zone API can not disambiguate this and therefore it's\nunclear which time should be returned:\n\n # This could be either 00:00 or 01:00 UTC:\n >>> dt = datetime(2012, 10, 28, 2, 0, tzinfo=zoneinfo('Europe/Stockholm'))\n # But we can not specify which:\n >>> dt.astimezone(zoneinfo('UTC'))\n datetime.datetime(2012, 10, 28, 1, 0, tzinfo=)\n\npytz solved this problem by adding is_dst parameters to several methods\nof the tzinfo objects to make it possible to disambiguate times when\nthis is desired.\n\nThis PEP proposes to add these is_dst parameters to the relevant methods\nof the datetime API, and therefore add this functionality directly to\ndatetime. This is likely the hardest part of this PEP as this involves\nupdating the C version of the datetime library with this functionality,\nas this involved writing new code, and not just reorganizing existing\nexternal libraries.\n\nImplementation API\n\nThe zoneinfo database\n\nThe latest version of the zoneinfo database should exist in the\nLib/tzdata directory of the Python source control system. This copy of\nthe database should be updated before every Python feature and bug-fix\nrelease, but not for releases of Python versions that are in\nsecurity-fix-only-mode.\n\nScripts to update the database will be provided in Tools/, and the\nrelease instructions will be updated to include this update.\n\nNew configure options --enable-internal-timezone-database and\n--disable-internal-timezone-database will be implemented to enable and\ndisable the installation of this database when installing from source. A\nsource install will default to installing them.\n\nBinary installers for systems that have a system-provided zoneinfo\ndatabase may skip installing the included database since it would never\nbe used for these platforms. For other platforms, for example Windows,\nbinary installers must install the included database.\n\nChanges in the datetime-module\n\nThe public API of the new time zone support contains one new class, one\nnew function, one new exception and four new collections. In addition to\nthis, several methods on the datetime object gets a new is_dst\nparameter.\n\nNew class dsttimezone\n\nThis class provides a concrete implementation of the tzinfo base class\nthat implements DST support.\n\nNew function zoneinfo(name=None, db_path=None)\n\nThis function takes a name string that must be a string specifying a\nvalid zoneinfo time zone, i.e. \"US/Eastern\", \"Europe/Warsaw\" or\n\"Etc/GMT\". If not given, the local time zone will be looked up. If an\ninvalid zone name is given, or the local time zone can not be retrieved,\nthe function raises UnknownTimeZoneError.\n\nThe function also takes an optional path to the location of the zoneinfo\ndatabase which should be used. If not specified, the function will look\nfor databases in the following order:\n\n1. Check if the tzdata-update module is installed, and then use that\n database.\n2. Use the database in /usr/share/zoneinfo, if it exists.\n3. Use the Python-provided database in Lib/tzdata.\n\nIf no database is found an UnknownTimeZoneError or subclass thereof will\nbe raised with a message explaining that no zoneinfo database can be\nfound, but that you can install one with the tzdata-update package.\n\nNew parameter is_dst\n\nA new is_dst parameter is added to several methods to handle time\nambiguity during DST changeovers.\n\n- tzinfo.utcoffset(dt, is_dst=False)\n- tzinfo.dst(dt, is_dst=False)\n- tzinfo.tzname(dt, is_dst=False)\n- datetime.astimezone(tz, is_dst=False)\n\nThe is_dst parameter can be False (default), True, or None.\n\nFalse will specify that the given datetime should be interpreted as not\nhappening during daylight savings time, i.e. that the time specified is\nafter the change from DST. This is default to preserve existing\nbehavior.\n\nTrue will specify that the given datetime should be interpreted as\nhappening during daylight savings time, i.e. that the time specified is\nbefore the change from DST.\n\nNone will raise an AmbiguousTimeError exception if the time specified\nwas during a DST change over. It will also raise a NonExistentTimeError\nif a time is specified during the \"missing time\" in a change to DST.\n\nNew exceptions\n\n- UnknownTimeZoneError\n\n This exception is a subclass of KeyError and raised when giving a\n time zone specification that can't be found:\n\n >>> datetime.zoneinfo('Europe/New_York')\n Traceback (most recent call last):\n ...\n UnknownTimeZoneError: There is no time zone called 'Europe/New_York'\n\n- InvalidTimeError\n\n This exception serves as a base for AmbiguousTimeError and\n NonExistentTimeError, to enable you to trap these two separately. It\n will subclass from ValueError, so that you can catch these errors\n together with inputs like the 29th of February 2011.\n\n- AmbiguousTimeError\n\n This exception is raised when giving a datetime specification that\n is ambiguous while setting is_dst to None:\n\n >>> datetime(2012, 11, 28, 2, 0, tzinfo=zoneinfo('Europe/Stockholm'), is_dst=None)\n >>>\n Traceback (most recent call last):\n ...\n AmbiguousTimeError: 2012-10-28 02:00:00 is ambiguous in time zone Europe/Stockholm\n\n- NonExistentTimeError\n\n This exception is raised when giving a datetime specification for a\n time that due to daylight saving does not exist, while setting\n is_dst to None:\n\n >>> datetime(2012, 3, 25, 2, 0, tzinfo=zoneinfo('Europe/Stockholm'), is_dst=None)\n >>>\n Traceback (most recent call last):\n ...\n NonExistentTimeError: 2012-03-25 02:00:00 does not exist in time zone Europe/Stockholm\n\nNew collections\n\n- all_timezones is the exhaustive list of the time zone names that can\n be used, listed alphabetically.\n- common_timezones is a list of useful, current time zones, listed\n alphabetically.\n\nThe tzdata-update-package\n\nThe zoneinfo database will be packaged for easy installation with\neasy_install/pip/buildout. This package will not install any Python\ncode, and will not contain any Python code except that which is needed\nfor installation.\n\nIt will be kept updated with the same tools as the internal database,\nbut released whenever the zoneinfo-database is updated, and use the same\nversion schema.\n\nDifferences from the pytz API\n\n- pytz has the functions localize() and normalize() to work around\n that tzinfo doesn't have is_dst. When is_dst is implemented directly\n in datetime.tzinfo they are no longer needed.\n- The timezone() function is called zoneinfo() to avoid clashing with\n the timezone class introduced in Python 3.2.\n- zoneinfo() will return the local time zone if called without\n arguments.\n- The class pytz.StaticTzInfo is there to provide the is_dst support\n for static time zones. When is_dst support is included in\n datetime.tzinfo it is no longer needed.\n- InvalidTimeError subclasses from ValueError.\n\nResources\n\n- http://pytz.sourceforge.net/\n- http://pypi.python.org/pypi/tzlocal\n- http://pypi.python.org/pypi/python-dateutil\n- http://unicode.org/cldr/data/common/supplemental/windowsZones.xml\n\nCopyright\n\nThis document has been placed in the public domain."},"source":{"kind":"string","value":"python-peps"},"added":{"kind":"string","value":"2024-10-18T13:23:34.242973"},"created":{"kind":"timestamp","value":"2012-12-11T00:00:00","string":"2012-12-11T00:00:00"},"metadata":{"kind":"string","value":"{\n \"license\": \"Public Domain\",\n \"url\": \"https://peps.python.org/pep-0431/\",\n \"authors\": [\n \"Lennart Regebro\"\n ],\n \"pep_number\": \"0431\",\n \"pandoc_version\": \"3.5\"\n}"}}}],"truncated":false,"partial":false},"paginationData":{"pageIndex":5,"numItemsPerPage":100,"numTotalItems":656,"offset":500,"length":100}},"jwt":"eyJhbGciOiJFZERTQSJ9.eyJyZWFkIjp0cnVlLCJwZXJtaXNzaW9ucyI6eyJyZXBvLmNvbnRlbnQucmVhZCI6dHJ1ZX0sImlhdCI6MTc1NjE4NjkwNywic3ViIjoiL2RhdGFzZXRzL2NvbW1vbi1waWxlL3B5dGhvbl9lbmhhbmNlbWVudF9wcm9wb3NhbHMiLCJleHAiOjE3NTYxOTA1MDcsImlzcyI6Imh0dHBzOi8vaHVnZ2luZ2ZhY2UuY28ifQ.zSsgLnxsONidnWZv4T56W2O6AxpSslMEMsDjAJJtds8gIDzXK49uk2HEa2wRHfqcjxNU0gINT1WmRsYsrqWOAA","displayUrls":true},"discussionsStats":{"closed":0,"open":3,"total":3},"fullWidth":true,"hasGatedAccess":true,"hasFullAccess":true,"isEmbedded":false,"savedQueries":{"community":[],"user":[]}}">
id
stringdate
1-01-01 00:00:00
8105-01-01 00:00:00
text
stringlengths
187
91.9k
source
stringclasses
1 value
added
stringdate
2024-10-18 13:23:14
2024-10-18 13:23:35
created
timestamp[s]date
1996-05-08 00:00:00
2024-10-11 00:00:00
metadata
dict
0307
PEP: 307 Title: Extensions to the pickle protocol Version: $Revision$ Last-Modified: $Date$ Author: Guido van Rossum, Tim Peters Status: Final Type: Standards Track Content-Type: text/x-rst Created: 31-Jan-2003 Python-Version: 2.3 Post-History: 07-Feb-2003 Introduction Pickling new-style objects in Python 2.2 is done somewhat clumsily and causes pickle size to bloat compared to classic class instances. This PEP documents a new pickle protocol in Python 2.3 that takes care of this and many other pickle issues. There are two sides to specifying a new pickle protocol: the byte stream constituting pickled data must be specified, and the interface between objects and the pickling and unpickling engines must be specified. This PEP focuses on API issues, although it may occasionally touch on byte stream format details to motivate a choice. The pickle byte stream format is documented formally by the standard library module pickletools.py (already checked into CVS for Python 2.3). This PEP attempts to fully document the interface between pickled objects and the pickling process, highlighting additions by specifying "new in this PEP". (The interface to invoke pickling or unpickling is not covered fully, except for the changes to the API for specifying the pickling protocol to picklers.) Motivation Pickling new-style objects causes serious pickle bloat. For example: class C(object): # Omit "(object)" for classic class pass x = C() x.foo = 42 print len(pickle.dumps(x, 1)) The binary pickle for the classic object consumed 33 bytes, and for the new-style object 86 bytes. The reasons for the bloat are complex, but are mostly caused by the fact that new-style objects use __reduce__ in order to be picklable at all. After ample consideration we've concluded that the only way to reduce pickle sizes for new-style objects is to add new opcodes to the pickle protocol. The net result is that with the new protocol, the pickle size in the above example is 35 (two extra bytes are used at the start to indicate the protocol version, although this isn't strictly necessary). Protocol versions Previously, pickling (but not unpickling) distinguished between text mode and binary mode. By design, binary mode is a superset of text mode, and unpicklers don't need to know in advance whether an incoming pickle uses text mode or binary mode. The virtual machine used for unpickling is the same regardless of the mode; certain opcodes simply aren't used in text mode. Retroactively, text mode is now called protocol 0, and binary mode protocol 1. The new protocol is called protocol 2. In the tradition of pickling protocols, protocol 2 is a superset of protocol 1. But just so that future pickling protocols aren't required to be supersets of the oldest protocols, a new opcode is inserted at the start of a protocol 2 pickle indicating that it is using protocol 2. To date, each release of Python has been able to read pickles written by all previous releases. Of course pickles written under protocol N can't be read by versions of Python earlier than the one that introduced protocol N. Several functions, methods and constructors used for pickling used to take a positional argument named 'bin' which was a flag, defaulting to 0, indicating binary mode. This argument is renamed to 'protocol' and now gives the protocol number, still defaulting to 0. It so happens that passing 2 for the 'bin' argument in previous Python versions had the same effect as passing 1. Nevertheless, a special case is added here: passing a negative number selects the highest protocol version supported by a particular implementation. This works in previous Python versions, too, and so can be used to select the highest protocol available in a way that's both backward and forward compatible. In addition, a new module constant HIGHEST_PROTOCOL is supplied by both pickle and cPickle, equal to the highest protocol number the module can read. This is cleaner than passing -1, but cannot be used before Python 2.3. The pickle.py module has supported passing the 'bin' value as a keyword argument rather than a positional argument. (This is not recommended, since cPickle only accepts positional arguments, but it works...) Passing 'bin' as a keyword argument is deprecated, and a PendingDeprecationWarning is issued in this case. You have to invoke the Python interpreter with -Wa or a variation on that to see PendingDeprecationWarning messages. In Python 2.4, the warning class may be upgraded to DeprecationWarning. Security issues In previous versions of Python, unpickling would do a "safety check" on certain operations, refusing to call functions or constructors that weren't marked as "safe for unpickling" by either having an attribute __safe_for_unpickling__ set to 1, or by being registered in a global registry, copy_reg.safe_constructors. This feature gives a false sense of security: nobody has ever done the necessary, extensive, code audit to prove that unpickling untrusted pickles cannot invoke unwanted code, and in fact bugs in the Python 2.2 pickle.py module make it easy to circumvent these security measures. We firmly believe that, on the Internet, it is better to know that you are using an insecure protocol than to trust a protocol to be secure whose implementation hasn't been thoroughly checked. Even high quality implementations of widely used protocols are routinely found flawed; Python's pickle implementation simply cannot make such guarantees without a much larger time investment. Therefore, as of Python 2.3, all safety checks on unpickling are officially removed, and replaced with this warning: Warning Do not unpickle data received from an untrusted or unauthenticated source. The same warning applies to previous Python versions, despite the presence of safety checks there. Extended __reduce__ API There are several APIs that a class can use to control pickling. Perhaps the most popular of these are __getstate__ and __setstate__; but the most powerful one is __reduce__. (There's also __getinitargs__, and we're adding __getnewargs__ below.) There are several ways to provide __reduce__ functionality: a class can implement a __reduce__ method or a __reduce_ex__ method (see next section), or a reduce function can be declared in copy_reg (copy_reg.dispatch_table maps classes to functions). The return values are interpreted exactly the same, though, and we'll refer to these collectively as __reduce__. Important: pickling of classic class instances does not look for a __reduce__ or __reduce_ex__ method or a reduce function in the copy_reg dispatch table, so that a classic class cannot provide __reduce__ functionality in the sense intended here. A classic class must use __getinitargs__ and/or __getstate__ to customize pickling. These are described below. __reduce__ must return either a string or a tuple. If it returns a string, this is an object whose state is not to be pickled, but instead a reference to an equivalent object referenced by name. Surprisingly, the string returned by __reduce__ should be the object's local name (relative to its module); the pickle module searches the module namespace to determine the object's module. The rest of this section is concerned with the tuple returned by __reduce__. It is a variable size tuple, of length 2 through 5. The first two items (function and arguments) are required. The remaining items are optional and may be left off from the end; giving None for the value of an optional item acts the same as leaving it off. The last two items are new in this PEP. The items are, in order: +----------+-----------------------------------------------------------+ | function | Required. | | | | | | A callable object (not necessarily a function) called to | | | create the initial version of the object; state may be | | | added to the object later to fully reconstruct the | | | pickled state. This function must itself be picklable. | | | See the section about __newobj__ for a special case (new | | | in this PEP) here. | +----------+-----------------------------------------------------------+ | a | Required. | | rguments | | | | A tuple giving the argument list for the function. As a | | | special case, designed for Zope 2's ExtensionClass, this | | | may be None; in that case, function should be a class or | | | type, and function.__basicnew__() is called to create the | | | initial version of the object. This exception is | | | deprecated. | +----------+-----------------------------------------------------------+ Unpickling invokes function(*arguments) to create an initial object, called obj below. If the remaining items are left off, that's the end of unpickling for this object and obj is the result. Else obj is modified at unpickling time by each item specified, as follows. +----------+-----------------------------------------------------------+ | state | Optional. | | | | | | Additional state. If this is not None, the state is | | | pickled, and obj.__setstate__(state) will be called when | | | unpickling. If no __setstate__ method is defined, a | | | default implementation is provided, which assumes that | | | state is a dictionary mapping instance variable names to | | | their values. The default implementation calls : | | | | | | obj.__dict__.update(state) | | | | | | or, if the update() call fails, : | | | | | | for k, v in state.items(): | | | setattr(obj, k, v) | +----------+-----------------------------------------------------------+ | l | Optional, and new in this PEP. | | istitems | | | | If this is not None, it should be an iterator (not a | | | sequence!) yielding successive list items. These list | | | items will be pickled, and appended to the object using | | | either obj.append(item) or obj.extend(list_of_items). | | | This is primarily used for list subclasses, but may be | | | used by other classes as long as they have append() and | | | extend() methods with the appropriate signature. (Whether | | | append() or extend() is used depends on which pickle | | | protocol version is used as well as the number of items | | | to append, so both must be supported.) | +----------+-----------------------------------------------------------+ | d | Optional, and new in this PEP. | | ictitems | | | | If this is not None, it should be an iterator (not a | | | sequence!) yielding successive dictionary items, which | | | should be tuples of the form (key, value). These items | | | will be pickled, and stored to the object using | | | obj[key] = value. This is primarily used for dict | | | subclasses, but may be used by other classes as long as | | | they implement __setitem__. | +----------+-----------------------------------------------------------+ Note: in Python 2.2 and before, when using cPickle, state would be pickled if present even if it is None; the only safe way to avoid the __setstate__ call was to return a two-tuple from __reduce__. (But pickle.py would not pickle state if it was None.) In Python 2.3, __setstate__ will never be called at unpickling time when __reduce__ returns a state with value None at pickling time. A __reduce__ implementation that needs to work both under Python 2.2 and under Python 2.3 could check the variable pickle.format_version to determine whether to use the listitems and dictitems features. If this value is >= "2.0" then they are supported. If not, any list or dict items should be incorporated somehow in the 'state' return value, and the __setstate__ method should be prepared to accept list or dict items as part of the state (how this is done is up to the application). The __reduce_ex__ API It is sometimes useful to know the protocol version when implementing __reduce__. This can be done by implementing a method named __reduce_ex__ instead of __reduce__. __reduce_ex__, when it exists, is called in preference over __reduce__ (you may still provide __reduce__ for backwards compatibility). The __reduce_ex__ method will be called with a single integer argument, the protocol version. The 'object' class implements both __reduce__ and __reduce_ex__; however, if a subclass overrides __reduce__ but not __reduce_ex__, the __reduce_ex__ implementation detects this and calls __reduce__. Customizing pickling absent a __reduce__ implementation If no __reduce__ implementation is available for a particular class, there are three cases that need to be considered separately, because they are handled differently: 1. classic class instances, all protocols 2. new-style class instances, protocols 0 and 1 3. new-style class instances, protocol 2 Types implemented in C are considered new-style classes. However, except for the common built-in types, these need to provide a __reduce__ implementation in order to be picklable with protocols 0 or 1. Protocol 2 supports built-in types providing __getnewargs__, __getstate__ and __setstate__ as well. Case 1: pickling classic class instances This case is the same for all protocols, and is unchanged from Python 2.1. For classic classes, __reduce__ is not used. Instead, classic classes can customize their pickling by providing methods named __getstate__, __setstate__ and __getinitargs__. Absent these, a default pickling strategy for classic class instances is implemented that works as long as all instance variables are picklable. This default strategy is documented in terms of default implementations of __getstate__ and __setstate__. The primary ways to customize pickling of classic class instances is by specifying __getstate__ and/or __setstate__ methods. It is fine if a class implements one of these but not the other, as long as it is compatible with the default version. The __getstate__ method The __getstate__ method should return a picklable value representing the object's state without referencing the object itself. If no __getstate__ method exists, a default implementation is used that returns self.__dict__. The __setstate__ method The __setstate__ method should take one argument; it will be called with the value returned by __getstate__ (or its default implementation). If no __setstate__ method exists, a default implementation is provided that assumes the state is a dictionary mapping instance variable names to values. The default implementation tries two things: - First, it tries to call self.__dict__.update(state). - If the update() call fails with a RuntimeError exception, it calls setattr(self, key, value) for each (key, value) pair in the state dictionary. This only happens when unpickling in restricted execution mode (see the rexec standard library module). The __getinitargs__ method The __setstate__ method (or its default implementation) requires that a new object already exists so that its __setstate__ method can be called. The point is to create a new object that isn't fully initialized; in particular, the class's __init__ method should not be called if possible. These are the possibilities: - Normally, the following trick is used: create an instance of a trivial classic class (one without any methods or instance variables) and then use __class__ assignment to change its class to the desired class. This creates an instance of the desired class with an empty __dict__ whose __init__ has not been called. - However, if the class has a method named __getinitargs__, the above trick is not used, and a class instance is created by using the tuple returned by __getinitargs__ as an argument list to the class constructor. This is done even if __getinitargs__ returns an empty tuple --- a __getinitargs__ method that returns () is not equivalent to not having __getinitargs__ at all. __getinitargs__ must return a tuple. - In restricted execution mode, the trick from the first bullet doesn't work; in this case, the class constructor is called with an empty argument list if no __getinitargs__ method exists. This means that in order for a classic class to be unpicklable in restricted execution mode, it must either implement __getinitargs__ or its constructor (i.e., its __init__ method) must be callable without arguments. Case 2: pickling new-style class instances using protocols 0 or 1 This case is unchanged from Python 2.2. For better pickling of new-style class instances when backwards compatibility is not an issue, protocol 2 should be used; see case 3 below. New-style classes, whether implemented in C or in Python, inherit a default __reduce__ implementation from the universal base class 'object'. This default __reduce__ implementation is not used for those built-in types for which the pickle module has built-in support. Here's a full list of those types: - Concrete built-in types: NoneType, bool, int, float, complex, str, unicode, tuple, list, dict. (Complex is supported by virtue of a __reduce__ implementation registered in copy_reg.) In Jython, PyStringMap is also included in this list. - Classic instances. - Classic class objects, Python function objects, built-in function and method objects, and new-style type objects (== new-style class objects). These are pickled by name, not by value: at unpickling time, a reference to an object with the same name (the fully qualified module name plus the variable name in that module) is substituted. The default __reduce__ implementation will fail at pickling time for built-in types not mentioned above, and for new-style classes implemented in C: if they want to be picklable, they must supply a custom __reduce__ implementation under protocols 0 and 1. For new-style classes implemented in Python, the default __reduce__ implementation (copy_reg._reduce) works as follows: Let D be the class on the object to be pickled. First, find the nearest base class that is implemented in C (either as a built-in type or as a type defined by an extension class). Call this base class B, and the class of the object to be pickled D. Unless B is the class 'object', instances of class B must be picklable, either by having built-in support (as defined in the above three bullet points), or by having a non-default __reduce__ implementation. B must not be the same class as D (if it were, it would mean that D is not implemented in Python). The callable produced by the default __reduce__ is copy_reg._reconstructor, and its arguments tuple is (D, B, basestate), where basestate is None if B is the builtin object class, and basestate is : basestate = B(obj) if B is not the builtin object class. This is geared toward pickling subclasses of builtin types, where, for example, list(some_list_subclass_instance) produces "the list part" of the list subclass instance. The object is recreated at unpickling time by copy_reg._reconstructor, like so: obj = B.__new__(D, basestate) B.__init__(obj, basestate) Objects using the default __reduce__ implementation can customize it by defining __getstate__ and/or __setstate__ methods. These work almost the same as described for classic classes above, except that if __getstate__ returns an object (of any type) whose value is considered false (e.g. None, or a number that is zero, or an empty sequence or mapping), this state is not pickled and __setstate__ will not be called at all. If __getstate__ exists and returns a true value, that value becomes the third element of the tuple returned by the default __reduce__, and at unpickling time the value is passed to __setstate__. If __getstate__ does not exist, but obj.__dict__ exists, then obj.__dict__ becomes the third element of the tuple returned by __reduce__, and again at unpickling time the value is passed to obj.__setstate__. The default __setstate__ is the same as that for classic classes, described above. Note that this strategy ignores slots. Instances of new-style classes that have slots but no __getstate__ method cannot be pickled by protocols 0 and 1; the code explicitly checks for this condition. Note that pickling new-style class instances ignores __getinitargs__ if it exists (and under all protocols). __getinitargs__ is useful only for classic classes. Case 3: pickling new-style class instances using protocol 2 Under protocol 2, the default __reduce__ implementation inherited from the 'object' base class is ignored. Instead, a different default implementation is used, which allows more efficient pickling of new-style class instances than possible with protocols 0 or 1, at the cost of backward incompatibility with Python 2.2 (meaning no more than that a protocol 2 pickle cannot be unpickled before Python 2.3). The customization uses three special methods: __getstate__, __setstate__ and __getnewargs__ (note that __getinitargs__ is again ignored). It is fine if a class implements one or more but not all of these, as long as it is compatible with the default implementations. The __getstate__ method The __getstate__ method should return a picklable value representing the object's state without referencing the object itself. If no __getstate__ method exists, a default implementation is used which is described below. There's a subtle difference between classic and new-style classes here: if a classic class's __getstate__ returns None, self.__setstate__(None) will be called as part of unpickling. But if a new-style class's __getstate__ returns None, its __setstate__ won't be called at all as part of unpickling. If no __getstate__ method exists, a default state is computed. There are several cases: - For a new-style class that has no instance __dict__ and no __slots__, the default state is None. - For a new-style class that has an instance __dict__ and no __slots__, the default state is self.__dict__. - For a new-style class that has an instance __dict__ and __slots__, the default state is a tuple consisting of two dictionaries: self.__dict__, and a dictionary mapping slot names to slot values. Only slots that have a value are included in the latter. - For a new-style class that has __slots__ and no instance __dict__, the default state is a tuple whose first item is None and whose second item is a dictionary mapping slot names to slot values described in the previous bullet. The __setstate__ method The __setstate__ method should take one argument; it will be called with the value returned by __getstate__ or with the default state described above if no __getstate__ method is defined. If no __setstate__ method exists, a default implementation is provided that can handle the state returned by the default __getstate__, described above. The __getnewargs__ method Like for classic classes, the __setstate__ method (or its default implementation) requires that a new object already exists so that its __setstate__ method can be called. In protocol 2, a new pickling opcode is used that causes a new object to be created as follows: obj = C.__new__(C, *args) where C is the class of the pickled object, and args is either the empty tuple, or the tuple returned by the __getnewargs__ method, if defined. __getnewargs__ must return a tuple. The absence of a __getnewargs__ method is equivalent to the existence of one that returns (). The __newobj__ unpickling function When the unpickling function returned by __reduce__ (the first item of the returned tuple) has the name __newobj__, something special happens for pickle protocol 2. An unpickling function named __newobj__ is assumed to have the following semantics: def __newobj__(cls, *args): return cls.__new__(cls, *args) Pickle protocol 2 special-cases an unpickling function with this name, and emits a pickling opcode that, given 'cls' and 'args', will return cls.__new__(cls, *args) without also pickling a reference to __newobj__ (this is the same pickling opcode used by protocol 2 for a new-style class instance when no __reduce__ implementation exists). This is the main reason why protocol 2 pickles are much smaller than classic pickles. Of course, the pickling code cannot verify that a function named __newobj__ actually has the expected semantics. If you use an unpickling function named __newobj__ that returns something different, you deserve what you get. It is safe to use this feature under Python 2.2; there's nothing in the recommended implementation of __newobj__ that depends on Python 2.3. The extension registry Protocol 2 supports a new mechanism to reduce the size of pickles. When class instances (classic or new-style) are pickled, the full name of the class (module name including package name, and class name) is included in the pickle. Especially for applications that generate many small pickles, this is a lot of overhead that has to be repeated in each pickle. For large pickles, when using protocol 1, repeated references to the same class name are compressed using the "memo" feature; but each class name must be spelled in full at least once per pickle, and this causes a lot of overhead for small pickles. The extension registry allows one to represent the most frequently used names by small integers, which are pickled very efficiently: an extension code in the range 1--255 requires only two bytes including the opcode, one in the range 256--65535 requires only three bytes including the opcode. One of the design goals of the pickle protocol is to make pickles "context-free": as long as you have installed the modules containing the classes referenced by a pickle, you can unpickle it, without needing to import any of those classes ahead of time. Unbridled use of extension codes could jeopardize this desirable property of pickles. Therefore, the main use of extension codes is reserved for a set of codes to be standardized by some standard-setting body. This being Python, the standard-setting body is the PSF. From time to time, the PSF will decide on a table mapping extension codes to class names (or occasionally names of other global objects; functions are also eligible). This table will be incorporated in the next Python release(s). However, for some applications, like Zope, context-free pickles are not a requirement, and waiting for the PSF to standardize some codes may not be practical. Two solutions are offered for such applications. First, a few ranges of extension codes are reserved for private use. Any application can register codes in these ranges. Two applications exchanging pickles using codes in these ranges need to have some out-of-band mechanism to agree on the mapping between extension codes and names. Second, some large Python projects (e.g. Zope) can be assigned a range of extension codes outside the "private use" range that they can assign as they see fit. The extension registry is defined as a mapping between extension codes and names. When an extension code is unpickled, it ends up producing an object, but this object is gotten by interpreting the name as a module name followed by a class (or function) name. The mapping from names to objects is cached. It is quite possible that certain names cannot be imported; that should not be a problem as long as no pickle containing a reference to such names has to be unpickled. (The same issue already exists for direct references to such names in pickles that use protocols 0 or 1.) Here is the proposed initial assignment of extension code ranges: +-------+-------+-------+---------------------------------------------------+ | First | Last | Count | Purpose | +=======+=======+=======+===================================================+ | 0 | 0 | 1 | Reserved --- will never be used | +-------+-------+-------+---------------------------------------------------+ | 1 | 127 | 127 | Reserved for Python standard library | +-------+-------+-------+---------------------------------------------------+ | 128 | 191 | 64 | Reserved for Zope | +-------+-------+-------+---------------------------------------------------+ | 192 | 239 | 48 | Reserved for 3rd parties | +-------+-------+-------+---------------------------------------------------+ | 240 | 255 | 16 | Reserved for private use (will never be assigned) | +-------+-------+-------+---------------------------------------------------+ | 256 | MAX | MAX | Reserved for future assignment | +-------+-------+-------+---------------------------------------------------+ MAX stands for 2147483647, or 2**31-1. This is a hard limitation of the protocol as currently defined. At the moment, no specific extension codes have been assigned yet. Extension registry API The extension registry is maintained as private global variables in the copy_reg module. The following three functions are defined in this module to manipulate the registry: add_extension(module, name, code) Register an extension code. The module and name arguments must be strings; code must be an int in the inclusive range 1 through MAX. This must either register a new (module, name) pair to a new code, or be a redundant repeat of a previous call that was not canceled by a remove_extension() call; a (module, name) pair may not be mapped to more than one code, nor may a code be mapped to more than one (module, name) pair. remove_extension(module, name, code) Arguments are as for add_extension(). Remove a previously registered mapping between (module, name) and code. clear_extension_cache() The implementation of extension codes may use a cache to speed up loading objects that are named frequently. This cache can be emptied (removing references to cached objects) by calling this method. Note that the API does not enforce the standard range assignments. It is up to applications to respect these. The copy module Traditionally, the copy module has supported an extended subset of the pickling APIs for customizing the copy() and deepcopy() operations. In particular, besides checking for a __copy__ or __deepcopy__ method, copy() and deepcopy() have always looked for __reduce__, and for classic classes, have looked for __getinitargs__, __getstate__ and __setstate__. In Python 2.2, the default __reduce__ inherited from 'object' made copying simple new-style classes possible, but slots and various other special cases were not covered. In Python 2.3, several changes are made to the copy module: - __reduce_ex__ is supported (and always called with 2 as the protocol version argument). - The four- and five-argument return values of __reduce__ are supported. - Before looking for a __reduce__ method, the copy_reg.dispatch_table is consulted, just like for pickling. - When the __reduce__ method is inherited from object, it is (unconditionally) replaced by a better one that uses the same APIs as pickle protocol 2: __getnewargs__, __getstate__, and __setstate__, handling list and dict subclasses, and handling slots. As a consequence of the latter change, certain new-style classes that were copyable under Python 2.2 are not copyable under Python 2.3. (These classes are also not picklable using pickle protocol 2.) A minimal example of such a class: class C(object): def __new__(cls, a): return object.__new__(cls) The problem only occurs when __new__ is overridden and has at least one mandatory argument in addition to the class argument. To fix this, a __getnewargs__ method should be added that returns the appropriate argument tuple (excluding the class). Pickling Python longs Pickling and unpickling Python longs takes time quadratic in the number of digits, in protocols 0 and 1. Under protocol 2, new opcodes support linear-time pickling and unpickling of longs. Pickling bools Protocol 2 introduces new opcodes for pickling True and False directly. Under protocols 0 and 1, bools are pickled as integers, using a trick in the representation of the integer in the pickle so that an unpickler can recognize that a bool was intended. That trick consumed 4 bytes per bool pickled. The new bool opcodes consume 1 byte per bool. Pickling small tuples Protocol 2 introduces new opcodes for more-compact pickling of tuples of lengths 1, 2 and 3. Protocol 1 previously introduced an opcode for more-compact pickling of empty tuples. Protocol identification Protocol 2 introduces a new opcode, with which all protocol 2 pickles begin, identifying that the pickle is protocol 2. Attempting to unpickle a protocol 2 pickle under older versions of Python will therefore raise an "unknown opcode" exception immediately. Pickling of large lists and dicts Protocol 1 pickles large lists and dicts "in one piece", which minimizes pickle size, but requires that unpickling create a temp object as large as the object being unpickled. Part of the protocol 2 changes break large lists and dicts into pieces of no more than 1000 elements each, so that unpickling needn't create a temp object larger than needed to hold 1000 elements. This isn't part of protocol 2, however: the opcodes produced are still part of protocol 1. __reduce__ implementations that return the optional new listitems or dictitems iterators also benefit from this unpickling temp-space optimization. Copyright This document has been placed in the public domain. Local Variables: mode: indented-text indent-tabs-mode: nil sentence-end-double-space: t fill-column: 70 End:
python-peps
2024-10-18T13:23:31.450168
2003-01-31T00:00:00
{ "license": "Public Domain", "url": "https://peps.python.org/pep-0307/", "authors": [ "Guido van Rossum", "Tim Peters" ], "pep_number": "0307", "pandoc_version": "3.5" }
0726
PEP: 726 Title: Module __setattr__ and __delattr__ Author: Sergey B Kirpichev <[email protected]> Sponsor: Adam Turner <[email protected]> Discussions-To: https://discuss.python.org/t/32640/ Status: Rejected Type: Standards Track Content-Type: text/x-rst Created: 24-Aug-2023 Python-Version: 3.13 Post-History: 06-Apr-2023, 31-Aug-2023, Resolution: https://discuss.python.org/t/32640/32 Abstract This PEP proposes supporting user-defined __setattr__ and __delattr__ methods on modules to extend customization of module attribute access beyond PEP 562. Motivation There are several potential uses of a module __setattr__: 1. To prevent setting an attribute at all (i.e. make it read-only) 2. To validate the value to be assigned 3. To intercept setting an attribute and update some other state Proper support for read-only attributes would also require adding the __delattr__ function to prevent their deletion. It would be convenient to directly support such customization, by recognizing __setattr__ and __delattr__ methods defined in a module that would act like normal python:object.__setattr__ and python:object.__delattr__ methods, except that they will be defined on module instances. Together with existing __getattr__ and __dir__ methods this will streamline all variants of customizing module attribute access. For example # mplib.py CONSTANT = 3.14 prec = 53 dps = 15 def dps_to_prec(n): """Return the number of bits required to represent n decimals accurately.""" return max(1, int(round((int(n)+1)*3.3219280948873626))) def prec_to_dps(n): """Return the number of accurate decimals that can be represented with n bits.""" return max(1, int(round(int(n)/3.3219280948873626)-1)) def validate(n): n = int(n) if n <= 0: raise ValueError('Positive integer expected') return n def __setattr__(name, value): if name == 'CONSTANT': raise AttributeError('Read-only attribute!') if name == 'dps': value = validate(value) globals()['dps'] = value globals()['prec'] = dps_to_prec(value) return if name == 'prec': value = validate(value) globals()['prec'] = value globals()['dps'] = prec_to_dps(value) return globals()[name] = value def __delattr__(name): if name in ('CONSTANT', 'dps', 'prec'): raise AttributeError('Read-only attribute!') del globals()[name] >>> import mplib >>> mplib.foo = 'spam' >>> mplib.CONSTANT = 42 Traceback (most recent call last): ... AttributeError: Read-only attribute! >>> del mplib.foo >>> del mplib.CONSTANT Traceback (most recent call last): ... AttributeError: Read-only attribute! >>> mplib.prec 53 >>> mplib.dps 15 >>> mplib.dps = 5 >>> mplib.prec 20 >>> mplib.dps = 0 Traceback (most recent call last): ... ValueError: Positive integer expected Existing Options The current workaround is assigning the __class__ of a module object to a custom subclass of python:types.ModuleType (see[1]). For example, to prevent modification or deletion of an attribute we could use: # mod.py import sys from types import ModuleType CONSTANT = 3.14 class ImmutableModule(ModuleType): def __setattr__(name, value): raise AttributeError('Read-only attribute!') def __delattr__(name): raise AttributeError('Read-only attribute!') sys.modules[__name__].__class__ = ImmutableModule But this variant is slower (~2x) than the proposed solution. More importantly, it also brings a noticeable speed regression (~2-3x) for attribute access. Specification The __setattr__ function at the module level should accept two arguments, the name of an attribute and the value to be assigned, and return None or raise an AttributeError. def __setattr__(name: str, value: typing.Any, /) -> None: ... The __delattr__ function should accept one argument, the name of an attribute, and return None or raise an AttributeError: def __delattr__(name: str, /) -> None: ... The __setattr__ and __delattr__ functions are looked up in the module __dict__. If present, the appropriate function is called to customize setting the attribute or its deletion, else the normal mechanism (storing/deleting the value in the module dictionary) will work. Defining module __setattr__ or __delattr__ only affects lookups made using the attribute access syntax --- directly accessing the module globals (whether by globals() within the module, or via a reference to the module's globals dictionary) is unaffected. For example: >>> import mod >>> mod.__dict__['foo'] = 'spam' # bypasses __setattr__, defined in mod.py or # mod.py def __setattr__(name, value): ... foo = 'spam' # bypasses __setattr__ globals()['bar'] = 'spam' # here too def f(): global x x = 123 f() # and here To use a module global and trigger __setattr__ (or __delattr__), one can access it via sys.modules[__name__] within the module's code: # mod.py sys.modules[__name__].foo = 'spam' # bypasses __setattr__ def __setattr__(name, value): ... sys.modules[__name__].bar = 'spam' # triggers __setattr__ This limitation is intentional (just as for the PEP 562), because the interpreter highly optimizes access to module globals and disabling all that and going through special methods written in Python would slow down the code unacceptably. How to Teach This The "Customizing module attribute access"[2] section of the documentation will be expanded to include new functions. Reference Implementation The reference implementation for this PEP can be found in CPython PR #108261. Backwards compatibility This PEP may break code that uses module level (global) names __setattr__ and __delattr__, but the language reference explicitly reserves all undocumented dunder names, and allows "breakage without warning"[3]. The performance implications of this PEP are small, since additional dictionary lookup is much cheaper than storing/deleting the value in the dictionary. Also it is hard to imagine a module that expects the user to set (and/or delete) attributes enough times to be a performance concern. On another hand, proposed mechanism allows to override setting/deleting of attributes without affecting speed of attribute access, which is much more likely scenario to get a performance penalty. Discussion As pointed out by Victor Stinner, the proposed API could be useful already in the stdlib, for example to ensure that sys.modules type is always a dict: >>> import sys >>> sys.modules = 123 >>> import asyncio Traceback (most recent call last): File "<stdin>", line 1, in <module> File "<frozen importlib._bootstrap>", line 1260, in _find_and_load AttributeError: 'int' object has no attribute 'get' or to prevent deletion of critical sys attributes, which makes the code more complicated. For example, code using sys.stderr has to check if the attribute exists and if it's not None. Currently, it's possible to remove any sys attribute, including functions: >>> import sys >>> del sys.excepthook >>> 1+ # notice the next line sys.excepthook is missing File "<stdin>", line 1 1+ ^ SyntaxError: invalid syntax See related issue for other details. Other stdlib modules also come with attributes which can be overridden (as a feature) and some input validation here could be helpful. Examples: threading.excepthook, warnings.showwarning, io.DEFAULT_BUFFER_SIZE or os.SEEK_SET. Also a typical use case for customizing module attribute access is managing deprecation warnings. But the PEP 562 accomplishes this scenario only partially: e.g. it's impossible to issue a warning during an attempt to change a renamed attribute. Footnotes Copyright This document is placed in the public domain or under the CC0-1.0-Universal license, whichever is more permissive. [1] Customizing module attribute access (https://docs.python.org/3.11/reference/datamodel.html#customizing-module-attribute-access) [2] Customizing module attribute access (https://docs.python.org/3.11/reference/datamodel.html#customizing-module-attribute-access) [3] Reserved classes of identifiers (https://docs.python.org/3.11/reference/lexical_analysis.html#reserved-classes-of-identifiers)
python-peps
2024-10-18T13:23:31.486745
2023-08-24T00:00:00
{ "license": "Public Domain", "url": "https://peps.python.org/pep-0726/", "authors": [ "Sergey B Kirpichev" ], "pep_number": "0726", "pandoc_version": "3.5" }
3105
PEP: 3105 Title: Make print a function Version: $Revision$ Last-Modified: $Date$ Author: Georg Brandl <[email protected]> Status: Final Type: Standards Track Content-Type: text/x-rst Created: 19-Nov-2006 Python-Version: 3.0 Post-History: Abstract The title says it all -- this PEP proposes a new print() builtin that replaces the print statement and suggests a specific signature for the new function. Rationale The print statement has long appeared on lists of dubious language features that are to be removed in Python 3000, such as Guido's "Python Regrets" presentation[1]. As such, the objective of this PEP is not new, though it might become much disputed among Python developers. The following arguments for a print() function are distilled from a python-3000 message by Guido himself[2]: - print is the only application-level functionality that has a statement dedicated to it. Within Python's world, syntax is generally used as a last resort, when something can't be done without help from the compiler. Print doesn't qualify for such an exception. - At some point in application development one quite often feels the need to replace print output by something more sophisticated, like logging calls or calls into some other I/O library. With a print() function, this is a straightforward string replacement, today it is a mess adding all those parentheses and possibly converting >>stream style syntax. - Having special syntax for print puts up a much larger barrier for evolution, e.g. a hypothetical new printf() function is not too far fetched when it will coexist with a print() function. - There's no easy way to convert print statements into another call if one needs a different separator, not spaces, or none at all. Also, there's no easy way at all to conveniently print objects with some other separator than a space. - If print() is a function, it would be much easier to replace it within one module (just def print(*args):...) or even throughout a program (e.g. by putting a different function in __builtin__.print). As it is, one can do this by writing a class with a write() method and assigning that to sys.stdout -- that's not bad, but definitely a much larger conceptual leap, and it works at a different level than print. Specification The signature for print(), taken from various mailings and recently posted on the python-3000 list[3] is: def print(*args, sep=' ', end='\n', file=None) A call like: print(a, b, c, file=sys.stderr) will be equivalent to today's: print >>sys.stderr, a, b, c while the optional sep and end arguments specify what is printed between and after the arguments, respectively. The softspace feature (a semi-secret attribute on files currently used to tell print whether to insert a space before the first item) will be removed. Therefore, there will not be a direct translation for today's: print "a", print which will not print a space between the "a" and the newline. Backwards Compatibility The changes proposed in this PEP will render most of today's print statements invalid. Only those which incidentally feature parentheses around all of their arguments will continue to be valid Python syntax in version 3.0, and of those, only the ones printing a single parenthesized value will continue to do the same thing. For example, in 2.x: >>> print ("Hello") Hello >>> print ("Hello", "world") ('Hello', 'world') whereas in 3.0: >>> print ("Hello") Hello >>> print ("Hello", "world") Hello world Luckily, as it is a statement in Python 2, print can be detected and replaced reliably and non-ambiguously by an automated tool, so there should be no major porting problems (provided someone writes the mentioned tool). Implementation The proposed changes were implemented in the Python 3000 branch in the Subversion revisions 53685 to 53704. Most of the legacy code in the library has been converted too, but it is an ongoing effort to catch every print statement that may be left in the distribution. References Copyright This document has been placed in the public domain. Local Variables: mode: indented-text indent-tabs-mode: nil sentence-end-double-space: t fill-column: 70 coding: utf-8 End: [1] http://legacy.python.org/doc/essays/ppt/regrets/PythonRegrets.pdf [2] Replacement for print in Python 3.0 (Guido van Rossum) https://mail.python.org/pipermail/python-dev/2005-September/056154.html [3] print() parameters in py3k (Guido van Rossum) https://mail.python.org/pipermail/python-3000/2006-November/004485.html
python-peps
2024-10-18T13:23:31.495598
2006-11-19T00:00:00
{ "license": "Public Domain", "url": "https://peps.python.org/pep-3105/", "authors": [ "Georg Brandl" ], "pep_number": "3105", "pandoc_version": "3.5" }
0491
PEP: 491 Title: The Wheel Binary Package Format 1.9 Author: Daniel Holth <[email protected]> Discussions-To: [email protected] Status: Deferred Type: Standards Track Topic: Packaging Content-Type: text/x-rst Created: 16-Apr-2015 Abstract This PEP describes the second version of a built-package format for Python called "wheel". Wheel provides a Python-specific, relocatable package format that allows people to install software more quickly and predictably than re-building from source each time. A wheel is a ZIP-format archive with a specially formatted file name and the .whl extension. It contains a single distribution nearly as it would be installed according to PEP 376 with a particular installation scheme. Simple wheels can be unpacked onto sys.path and used directly but wheels are usually installed with a specialized installer. This version of the wheel specification adds support for installing distributions into many different directories, and adds a way to find those files after they have been installed. PEP Deferral This PEP is not currently being actively pursued, with Python packaging improvements currently focusing on the package build process rather than expanding the binary archive format to cover additional use cases. Some specific elements to be addressed when work on this PEP is resumed in the future: - migrating the official wheel format definition to https://packaging.python.org/specifications/ (similar to what PEP 566 did for https://packaging.python.org/specifications/core-metadata/) - updating the PEP itself to focus on the changes being made between the two versions of the format and the rationale for those changes, rather than having to repeat all the information that is unchanged from PEP 427 - clarifying that the PEP is deliberately written to allow existing installers to be compliant with the specification when using existing install scheme definitions, while also allowing the creation of new install scheme definitions that take advantage of the richer categorisation scheme for the contents of the binary archive Rationale Wheel 1.0 is best at installing files into site-packages and a few other locations specified by distutils, but users would like to install files from single distribution into many directories -- perhaps separate locations for docs, data, and code. Unfortunately not everyone agrees on where these install locations should be relative to the root directory. This version of the format adds many more categories, each of which can be installed to a different destination based on policy. Since it might also be important to locate the installed files at runtime, this version of the format also adds a way to record the installed paths in a way that can be read by the installed software. Details Installing a wheel 'distribution-1.0-py32-none-any.whl' Wheel installation notionally consists of two phases: - Unpack. a. Parse distribution-1.0.dist-info/WHEEL. b. Check that installer is compatible with Wheel-Version. Warn if minor version is greater, abort if major version is greater. c. If Root-Is-Purelib == 'true', unpack archive into purelib (site-packages). d. Else unpack archive into platlib (site-packages). - Spread. a. Unpacked archive includes distribution-1.0.dist-info/ and (if there is data) distribution-1.0.data/. b. Move each subtree of distribution-1.0.data/ onto its destination path. Each subdirectory of distribution-1.0.data/ is a key into a dict of destination directories, such as distribution-1.0.data/(purelib|platlib|headers|scripts|data). c. Update scripts starting with #!python to point to the correct interpreter. (Note: Python scripts are usually handled by package metadata, and not included verbatim in wheel.) d. Update distribution-1.0.dist.info/RECORD with the installed paths. e. If empty, remove the distribution-1.0.data directory. f. Compile any installed .py to .pyc. (Uninstallers should be smart enough to remove .pyc even if it is not mentioned in RECORD.) In practice, installers will usually extract files directly from the archive to their destinations without writing a temporary distribution-1.0.data/ directory. Recommended installer features Rewrite #!python. In wheel, verbatim scripts are packaged in {distribution}-{version}.data/scripts/. If the first line of a file in scripts/ starts with exactly b'#!python', rewrite to point to the correct interpreter. Unix installers may need to add the +x bit to these files if the archive was created on Windows. The b'#!pythonw' convention is allowed. b'#!pythonw' indicates a GUI script instead of a console script. Generate script wrappers. Python scripts are more commonly represented as a module:callable string in package metadata, and are not included verbatim in the wheel archive's scripts directory. This kind of script gives the installer an opportunity to generate platform specific wrappers. Recommended archiver features Place .dist-info at the end of the archive. Archivers are encouraged to place the .dist-info files physically at the end of the archive. This enables some potentially interesting ZIP tricks including the ability to amend the metadata without rewriting the entire archive. File Format File name convention The wheel filename is {distribution}-{version}(-{build tag})?-{python tag}-{abi tag}-{platform tag}.whl. distribution Distribution name, e.g. 'django', 'pyramid'. version Distribution version, e.g. 1.0. build tag Optional build number. Must start with a digit. A tie breaker if two wheels have the same version. Sort as the empty string if unspecified, else sort the initial digits as a number, and the remainder lexicographically. language implementation and version tag E.g. 'py27', 'py2', 'py3'. abi tag E.g. 'cp33m', 'abi3', 'none'. platform tag E.g. 'linux_x86_64', 'any'. For example, distribution-1.0-1-py27-none-any.whl is the first build of a package called 'distribution', and is compatible with Python 2.7 (any Python 2.7 implementation), with no ABI (pure Python), on any CPU architecture. The last three components of the filename before the extension are called "compatibility tags." The compatibility tags express the package's basic interpreter requirements and are detailed in PEP 425. Escaping and Unicode Each component of the filename is escaped by replacing runs of non-alphanumeric characters with an underscore _: re.sub("[^\w\d.]+", "_", distribution, re.UNICODE) The archive filename is Unicode. The packaging tools may only support ASCII package names, but Unicode filenames are supported in this specification. The filenames inside the archive are encoded as UTF-8. Although some ZIP clients in common use do not properly display UTF-8 filenames, the encoding is supported by both the ZIP specification and Python's zipfile. File contents The contents of a wheel file, where {distribution} is replaced with the name of the package, e.g. beaglevote and {version} is replaced with its version, e.g. 1.0.0, consist of: 1. /, the root of the archive, contains all files to be installed in purelib or platlib as specified in WHEEL. purelib and platlib are usually both site-packages. 2. {distribution}-{version}.dist-info/ contains metadata. 3. {distribution}-{version}.data/ contains one subdirectory for each non-empty install scheme key not already covered, where the subdirectory name is an index into a dictionary of install paths (e.g. data, scripts, include, purelib, platlib). 4. Python scripts must appear in scripts and begin with exactly b'#!python' in order to enjoy script wrapper generation and #!python rewriting at install time. They may have any or no extension. 5. {distribution}-{version}.dist-info/METADATA is Metadata version 1.1 or greater format metadata. 6. {distribution}-{version}.dist-info/WHEEL is metadata about the archive itself in the same basic key: value format: Wheel-Version: 1.9 Generator: bdist_wheel 1.9 Root-Is-Purelib: true Tag: py2-none-any Tag: py3-none-any Build: 1 Install-Paths-To: wheel/_paths.py Install-Paths-To: wheel/_paths.json 7. Wheel-Version is the version number of the Wheel specification. 8. Generator is the name and optionally the version of the software that produced the archive. 9. Root-Is-Purelib is true if the top level directory of the archive should be installed into purelib; otherwise the root should be installed into platlib. 10. Tag is the wheel's expanded compatibility tags; in the example the filename would contain py2.py3-none-any. 11. Build is the build number and is omitted if there is no build number. 12. Install-Paths-To is a location relative to the archive that will be overwritten with the install-time paths of each category in the install scheme. See the install paths section. May appear 0 or more times. 13. A wheel installer should warn if Wheel-Version is greater than the version it supports, and must fail if Wheel-Version has a greater major version than the version it supports. 14. Wheel, being an installation format that is intended to work across multiple versions of Python, does not generally include .pyc files. 15. Wheel does not contain setup.py or setup.cfg. The .dist-info directory 1. Wheel .dist-info directories include at a minimum METADATA, WHEEL, and RECORD. 2. METADATA is the package metadata, the same format as PKG-INFO as found at the root of sdists. 3. WHEEL is the wheel metadata specific to a build of the package. 4. RECORD is a list of (almost) all the files in the wheel and their secure hashes. Unlike PEP 376, every file except RECORD, which cannot contain a hash of itself, must include its hash. The hash algorithm must be sha256 or better; specifically, md5 and sha1 are not permitted, as signed wheel files rely on the strong hashes in RECORD to validate the integrity of the archive. 5. PEP 376's INSTALLER and REQUESTED are not included in the archive. 6. RECORD.jws is used for digital signatures. It is not mentioned in RECORD. 7. RECORD.p7s is allowed as a courtesy to anyone who would prefer to use S/MIME signatures to secure their wheel files. It is not mentioned in RECORD. 8. During extraction, wheel installers verify all the hashes in RECORD against the file contents. Apart from RECORD and its signatures, installation will fail if any file in the archive is not both mentioned and correctly hashed in RECORD. The .data directory Any file that is not normally installed inside site-packages goes into the .data directory, named as the .dist-info directory but with the .data/ extension: distribution-1.0.dist-info/ distribution-1.0.data/ The .data directory contains subdirectories with the scripts, headers, documentation and so forth from the distribution. During installation the contents of these subdirectories are moved onto their destination paths. If a subdirectory is not found in the install scheme, the installer should emit a warning, and it should be installed at distribution-1.0.data/... as if the package was unpacked by a standard unzip tool. Install paths In addition to the distutils install paths, wheel now includes the listed categories based on GNU autotools. This expanded scheme should help installers to implement system policy, but installers may root each category at any location. A UNIX install scheme might map the categories to their installation paths like this: { 'bindir': '$eprefix/bin', 'sbindir': '$eprefix/sbin', 'libexecdir': '$eprefix/libexec', 'sysconfdir': '$prefix/etc', 'sharedstatedir': '$prefix/com', 'localstatedir': '$prefix/var', 'libdir': '$eprefix/lib', 'static_libdir': r'$prefix/lib', 'includedir': '$prefix/include', 'datarootdir': '$prefix/share', 'datadir': '$datarootdir', 'mandir': '$datarootdir/man', 'infodir': '$datarootdir/info', 'localedir': '$datarootdir/locale', 'docdir': '$datarootdir/doc/$dist_name', 'htmldir': '$docdir', 'dvidir': '$docdir', 'psdir': '$docdir', 'pdfdir': '$docdir', 'pkgdatadir': '$datadir/$dist_name' } If a package needs to find its files at runtime, it can request they be written to a specified file or files by the installer and included in those same files inside the archive itself, relative to their location within the archive (so a wheel is still installed correctly if unpacked with a standard unzip tool, or perhaps not unpacked at all). If the WHEEL metadata contains these fields: Install-Paths-To: wheel/_paths.py Install-Paths-To: wheel/_paths.json Then the wheel installer, when it is about to unpack wheel/_paths.py from the archive, replaces it with the actual paths used at install time. The paths may be absolute or relative to the generated file. If the filename ends with .py then a Python script is written. The script MUST be executed to get the paths, but it will probably look like this: data='../wheel-0.26.0.dev1.data/data' headers='../wheel-0.26.0.dev1.data/headers' platlib='../wheel-0.26.0.dev1.data/platlib' purelib='../wheel-0.26.0.dev1.data/purelib' scripts='../wheel-0.26.0.dev1.data/scripts' # ... If the filename ends with .json then a JSON document is written: { "data": "../wheel-0.26.0.dev1.data/data", ... } Only the categories actually used by a particular wheel must be written to this file. These files are designed to be written to a location that can be found by the installed package without introducing any dependency on a packaging library. Signed wheel files Wheel files include an extended RECORD that enables digital signatures. PEP 376's RECORD is altered to include a secure hash digestname=urlsafe_b64encode_nopad(digest) (urlsafe base64 encoding with no trailing = characters) as the second column instead of an md5sum. All possible entries are hashed, including any generated files such as .pyc files, but not RECORD which cannot contain its own hash. For example: file.py,sha256=AVTFPZpEKzuHr7OvQZmhaU3LvwKz06AJw8mT\_pNh2yI,3144 distribution-1.0.dist-info/RECORD,, The signature file(s) RECORD.jws and RECORD.p7s are not mentioned in RECORD at all since they can only be added after RECORD is generated. Every other file in the archive must have a correct hash in RECORD or the installation will fail. If JSON web signatures are used, one or more JSON Web Signature JSON Serialization (JWS-JS) signatures is stored in a file RECORD.jws adjacent to RECORD. JWS is used to sign RECORD by including the SHA-256 hash of RECORD as the signature's JSON payload: { "hash": "sha256=ADD-r2urObZHcxBW3Cr-vDCu5RJwT4CaRTHiFmbcIYY" } (The hash value is the same format used in RECORD.) If RECORD.p7s is used, it must contain a detached S/MIME format signature of RECORD. A wheel installer is not required to understand digital signatures but MUST verify the hashes in RECORD against the extracted file contents. When the installer checks file hashes against RECORD, a separate signature checker only needs to establish that RECORD matches the signature. See - 7515 - https://datatracker.ietf.org/doc/html/draft-jones-jose-jws-json-serialization.html - 7517 - https://datatracker.ietf.org/doc/html/draft-jones-jose-json-private-key.html Comparison to .egg 1. Wheel is an installation format; egg is importable. Wheel archives do not need to include .pyc and are less tied to a specific Python version or implementation. Wheel can install (pure Python) packages built with previous versions of Python so you don't always have to wait for the packager to catch up. 2. Wheel uses .dist-info directories; egg uses .egg-info. Wheel is compatible with the new world of Python packaging and the new concepts it brings. 3. Wheel has a richer file naming convention for today's multi-implementation world. A single wheel archive can indicate its compatibility with a number of Python language versions and implementations, ABIs, and system architectures. Historically the ABI has been specific to a CPython release, wheel is ready for the stable ABI. 4. Wheel is lossless. The first wheel implementation bdist_wheel always generates egg-info, and then converts it to a .whl. It is also possible to convert existing eggs and bdist_wininst distributions. 5. Wheel is versioned. Every wheel file contains the version of the wheel specification and the implementation that packaged it. Hopefully the next migration can simply be to Wheel 2.0. 6. Wheel is a reference to the other Python. FAQ Wheel defines a .data directory. Should I put all my data there? This specification does not have an opinion on how you should organize your code. The .data directory is just a place for any files that are not normally installed inside site-packages or on the PYTHONPATH. In other words, you may continue to use pkgutil.get_data(package, resource) even though those files will usually not be distributed in wheel's .data directory. Why does wheel include attached signatures? Attached signatures are more convenient than detached signatures because they travel with the archive. Since only the individual files are signed, the archive can be recompressed without invalidating the signature or individual files can be verified without having to download the whole archive. Why does wheel allow JWS signatures? The JOSE specifications of which JWS is a part are designed to be easy to implement, a feature that is also one of wheel's primary design goals. JWS yields a useful, concise pure-Python implementation. Why does wheel also allow S/MIME signatures? S/MIME signatures are allowed for users who need or want to use existing public key infrastructure with wheel. Signed packages are only a basic building block in a secure package update system. Wheel only provides the building block. What's the deal with "purelib" vs. "platlib"? Wheel preserves the "purelib" vs. "platlib" distinction, which is significant on some platforms. For example, Fedora installs pure Python packages to '/usr/lib/pythonX.Y/site-packages' and platform dependent packages to '/usr/lib64/pythonX.Y/site-packages'. A wheel with "Root-Is-Purelib: false" with all its files in {name}-{version}.data/purelib is equivalent to a wheel with "Root-Is-Purelib: true" with those same files in the root, and it is legal to have files in both the "purelib" and "platlib" categories. In practice a wheel should have only one of "purelib" or "platlib" depending on whether it is pure Python or not and those files should be at the root with the appropriate setting given for "Root-is-purelib". Is it possible to import Python code directly from a wheel file? Technically, due to the combination of supporting installation via simple extraction and using an archive format that is compatible with zipimport, a subset of wheel files do support being placed directly on sys.path. However, while this behaviour is a natural consequence of the format design, actually relying on it is generally discouraged. Firstly, wheel is designed primarily as a distribution format, so skipping the installation step also means deliberately avoiding any reliance on features that assume full installation (such as being able to use standard tools like pip and virtualenv to capture and manage dependencies in a way that can be properly tracked for auditing and security update purposes, or integrating fully with the standard build machinery for C extensions by publishing header files in the appropriate place). Secondly, while some Python software is written to support running directly from a zip archive, it is still common for code to be written assuming it has been fully installed. When that assumption is broken by trying to run the software from a zip archive, the failures can often be obscure and hard to diagnose (especially when they occur in third party libraries). The two most common sources of problems with this are the fact that importing C extensions from a zip archive is not supported by CPython (since doing so is not supported directly by the dynamic loading machinery on any platform) and that when running from a zip archive the __file__ attribute no longer refers to an ordinary filesystem path, but to a combination path that includes both the location of the zip archive on the filesystem and the relative path to the module inside the archive. Even when software correctly uses the abstract resource APIs internally, interfacing with external components may still require the availability of an actual on-disk file. Like metaclasses, monkeypatching and metapath importers, if you're not already sure you need to take advantage of this feature, you almost certainly don't need it. If you do decide to use it anyway, be aware that many projects will require a failure to be reproduced with a fully installed package before accepting it as a genuine bug. Appendix Example urlsafe-base64-nopad implementation: # urlsafe-base64-nopad for Python 3 import base64 def urlsafe_b64encode_nopad(data): return base64.urlsafe_b64encode(data).rstrip(b'=') def urlsafe_b64decode_nopad(data): pad = b'=' * (4 - (len(data) & 3)) return base64.urlsafe_b64decode(data + pad) Copyright This document has been placed into the public domain.
python-peps
2024-10-18T13:23:31.525897
2015-04-16T00:00:00
{ "license": "Public Domain", "url": "https://peps.python.org/pep-0491/", "authors": [ "Daniel Holth" ], "pep_number": "0491", "pandoc_version": "3.5" }
0593
PEP: 593 Title: Flexible function and variable annotations Author: Till Varoquaux <[email protected]>, Konstantin Kashin <[email protected]> Sponsor: Ivan Levkivskyi <[email protected]> Discussions-To: [email protected] Status: Final Type: Standards Track Topic: Typing Created: 26-Apr-2019 Python-Version: 3.9 Post-History: 20-May-2019 typing:annotated and typing.Annotated Abstract This PEP introduces a mechanism to extend the type annotations from PEP 484 with arbitrary metadata. Motivation PEP 484 provides a standard semantic for the annotations introduced in PEP 3107. PEP 484 is prescriptive but it is the de facto standard for most of the consumers of annotations; in many statically checked code bases, where type annotations are widely used, they have effectively crowded out any other form of annotation. Some of the use cases for annotations described in PEP 3107 (database mapping, foreign languages bridge) are not currently realistic given the prevalence of type annotations. Furthermore, the standardisation of type annotations rules out advanced features only supported by specific type checkers. Rationale This PEP adds an Annotated type to the typing module to decorate existing types with context-specific metadata. Specifically, a type T can be annotated with metadata x via the typehint Annotated[T, x]. This metadata can be used for either static analysis or at runtime. If a library (or tool) encounters a typehint Annotated[T, x] and has no special logic for metadata x, it should ignore it and simply treat the type as T. Unlike the no_type_check functionality that currently exists in the typing module which completely disables typechecking annotations on a function or a class, the Annotated type allows for both static typechecking of T (e.g., via mypy or Pyre, which can safely ignore x) together with runtime access to x within a specific application. The introduction of this type would address a diverse set of use cases of interest to the broader Python community. This was originally brought up as issue 600 in the typing github and then discussed in Python ideas. Motivating examples Combining runtime and static uses of annotations There's an emerging trend of libraries leveraging the typing annotations at runtime (e.g.: dataclasses); having the ability to extend the typing annotations with external data would be a great boon for those libraries. Here's an example of how a hypothetical module could leverage annotations to read c structs: UnsignedShort = Annotated[int, struct2.ctype('H')] SignedChar = Annotated[int, struct2.ctype('b')] class Student(struct2.Packed): # mypy typechecks 'name' field as 'str' name: Annotated[str, struct2.ctype("<10s")] serialnum: UnsignedShort school: SignedChar # 'unpack' only uses the metadata within the type annotations Student.unpack(record) # Student(name=b'raymond ', serialnum=4658, school=264) Lowering barriers to developing new typing constructs Typically when adding a new type, a developer need to upstream that type to the typing module and change mypy, PyCharm, Pyre, pytype, etc... This is particularly important when working on open-source code that makes use of these types, seeing as the code would not be immediately transportable to other developers' tools without additional logic. As a result, there is a high cost to developing and trying out new types in a codebase. Ideally, authors should be able to introduce new types in a manner that allows for graceful degradation (e.g.: when clients do not have a custom mypy plugin), which would lower the barrier to development and ensure some degree of backward compatibility. For example, suppose that an author wanted to add support for tagged unions to Python. One way to accomplish would be to annotate TypedDict in Python such that only one field is allowed to be set: Currency = Annotated[ TypedDict('Currency', {'dollars': float, 'pounds': float}, total=False), TaggedUnion, ] This is a somewhat cumbersome syntax but it allows us to iterate on this proof-of-concept and have people with type checkers (or other tools) that don't yet support this feature work in a codebase with tagged unions. The author could easily test this proposal and iron out the kinks before trying to upstream tagged union to typing, mypy, etc. Moreover, tools that do not have support for parsing the TaggedUnion annotation would still be able to treat Currency as a TypedDict, which is still a close approximation (slightly less strict). Specification Syntax Annotated is parameterized with a type and an arbitrary list of Python values that represent the annotations. Here are the specific details of the syntax: - The first argument to Annotated must be a valid type - Multiple type annotations are supported (Annotated supports variadic arguments): Annotated[int, ValueRange(3, 10), ctype("char")] - Annotated must be called with at least two arguments ( Annotated[int] is not valid) - The order of the annotations is preserved and matters for equality checks: Annotated[int, ValueRange(3, 10), ctype("char")] != Annotated[ int, ctype("char"), ValueRange(3, 10) ] - Nested Annotated types are flattened, with metadata ordered starting with the innermost annotation: Annotated[Annotated[int, ValueRange(3, 10)], ctype("char")] == Annotated[ int, ValueRange(3, 10), ctype("char") ] - Duplicated annotations are not removed: Annotated[int, ValueRange(3, 10)] != Annotated[ int, ValueRange(3, 10), ValueRange(3, 10) ] - Annotated can be used with nested and generic aliases: Typevar T = ... Vec = Annotated[List[Tuple[T, T]], MaxLen(10)] V = Vec[int] V == Annotated[List[Tuple[int, int]], MaxLen(10)] Consuming annotations Ultimately, the responsibility of how to interpret the annotations (if at all) is the responsibility of the tool or library encountering the Annotated type. A tool or library encountering an Annotated type can scan through the annotations to determine if they are of interest (e.g., using isinstance()). Unknown annotations: When a tool or a library does not support annotations or encounters an unknown annotation it should just ignore it and treat annotated type as the underlying type. For example, when encountering an annotation that is not an instance of struct2.ctype to the annotations for name (e.g., Annotated[str, 'foo', struct2.ctype("<10s")]), the unpack method should ignore it. Namespacing annotations: Namespaces are not needed for annotations since the class used by the annotations acts as a namespace. Multiple annotations: It's up to the tool consuming the annotations to decide whether the client is allowed to have several annotations on one type and how to merge those annotations. Since the Annotated type allows you to put several annotations of the same (or different) type(s) on any node, the tools or libraries consuming those annotations are in charge of dealing with potential duplicates. For example, if you are doing value range analysis you might allow this: T1 = Annotated[int, ValueRange(-10, 5)] T2 = Annotated[T1, ValueRange(-20, 3)] Flattening nested annotations, this translates to: T2 = Annotated[int, ValueRange(-10, 5), ValueRange(-20, 3)] Interaction with get_type_hints() typing.get_type_hints() will take a new argument include_extras that defaults to False to preserve backward compatibility. When include_extras is False, the extra annotations will be stripped out of the returned value. Otherwise, the annotations will be returned unchanged: @struct2.packed class Student(NamedTuple): name: Annotated[str, struct.ctype("<10s")] get_type_hints(Student) == {'name': str} get_type_hints(Student, include_extras=False) == {'name': str} get_type_hints(Student, include_extras=True) == { 'name': Annotated[str, struct.ctype("<10s")] } Aliases & Concerns over verbosity Writing typing.Annotated everywhere can be quite verbose; fortunately, the ability to alias annotations means that in practice we don't expect clients to have to write lots of boilerplate code: T = TypeVar('T') Const = Annotated[T, my_annotations.CONST] class C: def const_method(self: Const[List[int]]) -> int: ... Rejected ideas Some of the proposed ideas were rejected from this PEP because they would cause Annotated to not integrate cleanly with the other typing annotations: - Annotated cannot infer the decorated type. You could imagine that Annotated[..., Immutable] could be used to mark a value as immutable while still inferring its type. Typing does not support using the inferred type anywhere else; it's best to not add this as a special case. - Using (Type, Ann1, Ann2, ...) instead of Annotated[Type, Ann1, Ann2, ...]. This would cause confusion when annotations appear in nested positions (Callable[[A, B], C] is too similar to Callable[[(A, B)], C]) and would make it impossible for constructors to be passthrough (T(5) == C(5) when C = Annotation[T, Ann]). This feature was left out to keep the design simple: - Annotated cannot be called with a single argument. Annotated could support returning the underlying value when called with a single argument (e.g.: Annotated[int] == int). This complicates the specifications and adds little benefit. Copyright This document has been placed in the public domain.
python-peps
2024-10-18T13:23:31.543778
2019-04-26T00:00:00
{ "license": "Public Domain", "url": "https://peps.python.org/pep-0593/", "authors": [ "Konstantin Kashin", "Till Varoquaux" ], "pep_number": "0593", "pandoc_version": "3.5" }
0621
PEP: 621 Title: Storing project metadata in pyproject.toml Author: Brett Cannon <[email protected]>, Dustin Ingram <[email protected]>, Paul Ganssle <paul at ganssle.io>, Pradyun Gedam <[email protected]>, Sébastien Eustace <[email protected]>, Thomas Kluyver <[email protected]>, Tzu-ping Chung <[email protected]> Discussions-To: https://discuss.python.org/t/pep-621-round-3/5472 Status: Final Type: Standards Track Topic: Packaging Content-Type: text/x-rst Created: 22-Jun-2020 Post-History: 22-Jun-2020, 18-Oct-2020, 24-Oct-2020, 31-Oct-2020 Resolution: https://discuss.python.org/t/pep-621-round-3/5472/109 packaging:pyproject-toml-spec Abstract This PEP specifies how to write a project's core metadata in a pyproject.toml file for packaging-related tools to consume. Motivation The key motivators of this PEP are: - Encourage users to specify core metadata statically for speed, ease of specification, unambiguity, and deterministic consumption by build back-ends - Provide a tool-agnostic way of specifying metadata for ease of learning and transitioning between build back-ends - Allow for more code sharing between build back-ends for the "boring parts" of a project's metadata To speak specifically to the motivation for static metadata, that has been an overall goal of the packaging ecosystem for some time. As such, making it easy to specify metadata statically is important. This also means that raising the cost of specifying data as dynamic is acceptable as users should skew towards wanting to provide static metadata. Requiring the distinction between static and dynamic metadata also helps with disambiguation for when metadata isn't specified. When any metadata may be dynamic, it means you never know if the absence of metadata is on purpose or because it is to be provided later. By requiring that dynamic metadata be specified, it disambiguates the intent when metadata goes unspecified. This PEP does not attempt to standardize all possible metadata required by a build back-end, only the metadata covered by the core metadata specification which are very common across projects and would stand to benefit from being static and consistently specified. This means build back-ends are still free and able to innovate around patterns like how to specify the files to include in a wheel. There is also an included escape hatch for users and build back-ends to use when they choose to partially opt-out of this PEP (compared to opting-out of this PEP entirely, which is also possible). This PEP is also not trying to change the underlying core metadata in any way. Such considerations should be done in a separate PEP which may lead to changes or additions to what this PEP specifies. Rationale The design guidelines the authors of this PEP followed were: - Define a representation of as much of the core metadata in pyproject.toml as is reasonable - Define the metadata statically with an escape hatch for those who want to define it dynamically later via a build back-end - Use familiar names where it makes sense, but be willing to use more modern terminology - Try to be ergonomic within a TOML file instead of mirroring how build back-ends specify metadata at a low-level when it makes sense - Learn from other build back-ends in the packaging ecosystem which have used TOML for their metadata - Don't try to standardize things which lack a pre-existing standard at a lower-level - When metadata is specified using this PEP, it is considered canonical Specification When specifying project metadata, tools MUST adhere and honour the metadata as specified in this PEP. If metadata is improperly specified then tools MUST raise an error to notify the user about their mistake. Data specified using this PEP is considered canonical. Tools CANNOT remove, add or change data that has been statically specified. Only when a field is marked as dynamic may a tool provide a "new" value. Details Table name Tools MUST specify fields defined by this PEP in a table named [project]. No tools may add fields to this table which are not defined by this PEP or subsequent PEPs. For tools wishing to store their own settings in pyproject.toml, they may use the [tool] table as defined in PEP 518. The lack of a [project] table implicitly means the build back-end will dynamically provide all fields. name - Format: string - Core metadata: Name (link) - Synonyms - Flit: module/dist-name (link) - Poetry: name (link) - Setuptools: name (link) The name of the project. Tools MUST require users to statically define this field. Tools SHOULD normalize this name, as specified by PEP 503, as soon as it is read for internal consistency. version - Format: string - Core metadata: Version (link) - Synonyms - Flit: N/A (read from a __version__ attribute) (link) - Poetry: version (link) - Setuptools: version (link) The version of the project as supported by PEP 440. Users SHOULD prefer to specify already-normalized versions. description - Format: string - Core metadata: Summary (link) - Synonyms - Flit: N/A - Poetry: description (link) - Setuptools: description (link) The summary description of the project. readme - Format: String or table - Core metadata: Description (link) - Synonyms - Flit: description-file (link) - Poetry: readme (link) - Setuptools: long_description (link) The full description of the project (i.e. the README). The field accepts either a string or a table. If it is a string then it is the relative path to a text file containing the full description. Tools MUST assume the file's encoding is UTF-8. If the file path ends in a case-insensitive .md suffix, then tools MUST assume the content-type is text/markdown. If the file path ends in a case-insensitive .rst, then tools MUST assume the content-type is text/x-rst. If a tool recognizes more extensions than this PEP, they MAY infer the content-type for the user without specifying this field as dynamic. For all unrecognized suffixes when a content-type is not provided, tools MUST raise an error. The readme field may also take a table. The file key has a string value representing a relative path to a file containing the full description. The text key has a string value which is the full description. These keys are mutually-exclusive, thus tools MUST raise an error if the metadata specifies both keys. A table specified in the readme field also has a content-type field which takes a string specifying the content-type of the full description. A tool MUST raise an error if the metadata does not specify this field in the table. If the metadata does not specify the charset parameter, then it is assumed to be UTF-8. Tools MAY support other encodings if they choose to. Tools MAY support alternative content-types which they can transform to a content-type as supported by the core metadata. Otherwise tools MUST raise an error for unsupported content-types. requires-python - Format: string - Core metadata: Requires-Python (link) - Synonyms - Flit: requires-python (link) - Poetry: As a python dependency in the [tool.poetry.dependencies] table (link) - Setuptools: python_requires (link) The Python version requirements of the project. license - Format: Table - Core metadata: License (link) - Synonyms - Flit: license (link) - Poetry: license (link) - Setuptools: license, license_file, license_files (link) The table may have one of two keys. The file key has a string value that is a relative file path to the file which contains the license for the project. Tools MUST assume the file's encoding is UTF-8. The text key has a string value which is the license of the project whose meaning is that of the License field from the core metadata. These keys are mutually exclusive, so a tool MUST raise an error if the metadata specifies both keys. A practical string value for the license key has been purposefully left out to allow for a future PEP to specify support for SPDX expressions (the same logic applies to any sort of "type" field specifying what license the file or text represents). authors/maintainers - Format: Array of inline tables with string keys and values - Core metadata: Author/Author-email/Maintainer/Maintainer-email (link) - Synonyms - Flit: author/author-email/maintainer/maintainer-email (link) - Poetry: authors/maintainers (link) - Setuptools: author/author_email/maintainer/maintainer_email (link) The people or organizations considered to be the "authors" of the project. The exact meaning is open to interpretation — it may list the original or primary authors, current maintainers, or owners of the package. The "maintainers" field is similar to "authors" in that its exact meaning is open to interpretation. These fields accept an array of tables with 2 keys: name and email. Both values must be strings. The name value MUST be a valid email name (i.e. whatever can be put as a name, before an email, in 822) and not contain commas. The email value MUST be a valid email address. Both keys are optional. Using the data to fill in core metadata is as follows: 1. If only name is provided, the value goes in Author/Maintainer as appropriate. 2. If only email is provided, the value goes in Author-email/Maintainer-email as appropriate. 3. If both email and name are provided, the value goes in Author-email/Maintainer-email as appropriate, with the format {name} <{email}> (with appropriate quoting, e.g. using email.headerregistry.Address). 4. Multiple values should be separated by commas. keywords - Format: array of strings - Core metadata: Keywords (link) - Synonyms - Flit: keywords (link) - Poetry: keywords (link) - Setuptools: keywords (link) The keywords for the project. classifiers - Format: array of strings - Core metadata: Classifier (link) - Synonyms - Flit: classifiers (link) - Poetry: classifiers (link) - Setuptools: classifiers (link) Trove classifiers which apply to the project. urls - Format: Table, with keys and values of strings - Core metadata: Project-URL (link) - Synonyms - Flit: [tool.flit.metadata.urls] table (link) - Poetry: [tool.poetry.urls] table (link) - Setuptools: project_urls (link) A table of URLs where the key is the URL label and the value is the URL itself. Entry points - Format: Table ([project.scripts], [project.gui-scripts], and [project.entry-points]) - Core metadata: N/A; Entry points specification - Synonyms - Flit: [tool.flit.scripts] table for console scripts, [tool.flit.entrypoints] for the rest (link) - Poetry: [tool.poetry.scripts] table for console scripts (link) - Setuptools: entry_points (link) There are three tables related to entry points. The [project.scripts] table corresponds to the console_scripts group in the entry points specification. The key of the table is the name of the entry point and the value is the object reference. The [project.gui-scripts] table corresponds to the gui_scripts group in the entry points specification. Its format is the same as [project.scripts]. The [project.entry-points] table is a collection of tables. Each sub-table's name is an entry point group. The key and value semantics are the same as [project.scripts]. Users MUST NOT create nested sub-tables but instead keep the entry point groups to only one level deep. Build back-ends MUST raise an error if the metadata defines a [project.entry-points.console_scripts] or [project.entry-points.gui_scripts] table, as they would be ambiguous in the face of [project.scripts] and [project.gui-scripts], respectively. dependencies/optional-dependencies - Format: Array of PEP 508 strings (dependencies) and a table with values of arrays of PEP 508 strings (optional-dependencies) - Core metadata: Requires-Dist and Provides-Extra (link, link) - Synonyms - Flit: requires for required dependencies, requires-extra for optional dependencies (link) - Poetry: [tool.poetry.dependencies] for dependencies (both required and for development), [tool.poetry.extras] for optional dependencies (link) - Setuptools: install_requires for required dependencies, extras_require for optional dependencies (link) The (optional) dependencies of the project. For dependencies, it is a key whose value is an array of strings. Each string represents a dependency of the project and MUST be formatted as a valid PEP 508 string. Each string maps directly to a Requires-Dist entry in the core metadata. For optional-dependencies, it is a table where each key specifies an extra and whose value is an array of strings. The strings of the arrays must be valid PEP 508 strings. The keys MUST be valid values for the Provides-Extra core metadata. Each value in the array thus becomes a corresponding Requires-Dist entry for the matching Provides-Extra metadata. dynamic - Format: Array of strings - Core metadata: N/A - No synonyms Specifies which fields listed by this PEP were intentionally unspecified so another tool can/will provide such metadata dynamically. This clearly delineates which metadata is purposefully unspecified and expected to stay unspecified compared to being provided via tooling later on. - A build back-end MUST honour statically-specified metadata (which means the metadata did not list the field in dynamic). - A build back-end MUST raise an error if the metadata specifies the name in dynamic. - If the core metadata specification lists a field as "Required", then the metadata MUST specify the field statically or list it in dynamic (build back-ends MUST raise an error otherwise, i.e. it should not be possible for a required field to not be listed somehow in the [project] table). - If the core metadata specification lists a field as "Optional", the metadata MAY list it in dynamic if the expectation is a build back-end will provide the data for the field later. - Build back-ends MUST raise an error if the metadata specifies a field statically as well as being listed in dynamic. - If the metadata does not list a field in dynamic, then a build back-end CANNOT fill in the requisite metadata on behalf of the user (i.e. dynamic is the only way to allow a tool to fill in metadata and the user must opt into the filling in). - Build back-ends MUST raise an error if the metadata specifies a field in dynamic but the build back-end was unable to provide the data for it. Example [project] name = "spam" version = "2020.0.0" description = "Lovely Spam! Wonderful Spam!" readme = "README.rst" requires-python = ">=3.8" license = {file = "LICENSE.txt"} keywords = ["egg", "bacon", "sausage", "tomatoes", "Lobster Thermidor"] authors = [ {email = "[email protected]"}, {name = "Tzu-ping Chung"} ] maintainers = [ {name = "Brett Cannon", email = "[email protected]"} ] classifiers = [ "Development Status :: 4 - Beta", "Programming Language :: Python" ] dependencies = [ "httpx", "gidgethub[httpx]>4.0.0", "django>2.1; os_name != 'nt'", "django>2.0; os_name == 'nt'" ] [project.optional-dependencies] test = [ "pytest < 5.0.0", "pytest-cov[all]" ] [project.urls] homepage = "https://example.com" documentation = "https://readthedocs.org" repository = "https://github.com" changelog = "https://github.com/me/spam/blob/master/CHANGELOG.md" [project.scripts] spam-cli = "spam:main_cli" [project.gui-scripts] spam-gui = "spam:main_gui" [project.entry-points."spam.magical"] tomatoes = "spam:main_tomatoes" Backwards Compatibility As this provides a new way to specify a project's core metadata and is using a new table name which falls under the reserved namespace as outlined in PEP 518, there are no backwards-compatibility concerns. Security Implications There are no direct security concerns as this PEP covers how to statically define project metadata. Any security issues would stem from how tools consume the metadata and choose to act upon it. Reference Implementation There are currently no proofs-of-concept from any build back-end implementing this PEP. Rejected Ideas Other table names Anything under [build-system] There was worry that using this table name would exacerbate confusion between build metadata and project metadata, e.g. by using [build-system.metadata] as a table. [package] Garnered no strong support. [metadata] The strongest contender after [project], but in the end it was agreed that [project] read better for certain sub-tables, e.g. [project.urls]. Support for a metadata provider Initially there was a proposal to add a middle layer between the static metadata specified by this PEP and prepare_metadata_for_build_wheel() as specified by PEP 517. The idea was that if a project wanted to insert itself between a build back-end and the metadata there would be a hook to do so. In the end the authors considered this idea unnecessarily complicated and would move the PEP away from its design goal to push people to define core metadata statically as much as possible. Require a normalized project name While it would make things easier for tools to only work with the normalized name as specified in PEP 503, the idea was ultimately rejected as it would hurt projects transitioning to using this PEP. Specify files to include when building The authors decided fairly quickly during design discussions that this PEP should focus exclusively on project metadata and not build metadata. As such, specifying what files should end up in a source distribution or wheel file is out of scope for this PEP. Name the [project.urls] table [project.project-urls] This suggestion came thanks to the corresponding core metadata being Project-Url. But once the overall table name of [project] was chosen, the redundant use of the word "project" suggested the current, shorter name was a better fit. Have a separate url/home-page field While the core metadata supports it, having a single field for a project's URL while also supporting a full table seemed redundant and confusing. Recommend that tools put development-related dependencies into a "dev" extra As various tools have grown the concept of required dependencies versus development dependencies, the idea of suggesting to tools that they put such development tool into a "dev" grouping came up. In the end, though, the authors deemed it out-of-scope for this specification to suggest such a workflow. Have the dynamic field only require specifying missing required fields The authors considered the idea that the dynamic field would only require the listing of missing required fields and make listing optional fields optional. In the end, though, this went against the design goal of promoting specifying as much information statically as possible. Different structures for the readme field The readme field had a proposed readme_content_type field, but the authors considered the string/table hybrid more practical for the common case while still accommodating the more complex case. Same goes for using long_description and a corresponding long_description_content_type field. The file key in the table format was originally proposed as path, but file corresponds to setuptools' file key and there is no strong reason otherwise to choose one over the other. Allowing the readme field to imply text/plain The authors considered allowing for unspecified content-types which would default to text/plain, but decided that it would be best to be explicit in this case to prevent accidental incorrect renderings on PyPI and to force users to be clear in their intent. Other names for dependencies/optional-dependencies The authors originally proposed requires/extra-requires as names, but decided to go with the current names after a survey of other packaging ecosystems showed Python was an outlier: 1. npm 2. Rust 3. Dart 4. Swift 5. Ruby Normalizing on the current names helps minimize confusion for people coming from other ecosystems without using terminology that is necessarily foreign to new programmers. It also prevents potential confusion with requires in the [build-system] table as specified in PEP 518. Drop maintainers to unify with authors As the difference between Authors and Maintainers fields in the core metadata is unspecified and ambiguous, this PEP originally proposed unifying them as a single authors field. Other ecosystems have selected "author" as the term to use, so the thinking was to standardize on Author in the core metadata as the place to list people maintaining a project. In the end, though, the decision to adhere to the core metadata was deemed more important to help with the acceptance of this PEP, rather than trying to introduce a new interpretation for some of the core metadata. Support an arbitrary depth of tables for project.entry-points There was a worry that keeping project.entry-points to a depth of 1 for sub-tables would cause confusion to users if they use a dotted name and are not used to table names using quotation marks (e.g. project.entry-points."spam.magical"). But supporting an arbitrary depth -- e.g. project.entry-points.spam.magical -- would preclude any form of an exploded table format in the future. It would also complicate things for build back-ends as they would have to make sure to traverse the full table structure rather than a single level and raising errors as appropriate on value types. Using structured TOML dictionaries to specify dependencies The format for specifying the dependencies of a project was the most hotly contested topic in terms of data format. It led to the creation of both PEP 631 and PEP 633 which represent what is in this PEP and using TOML dictionaries more extensively, respectively. The decision on those PEPs can be found at https://discuss.python.org/t/how-to-specify-dependencies-pep-508-strings-or-a-table-in-toml/5243/38. The authors briefly considered supporting both formats, but decided that it would lead to confusion as people would need to be familiar with two formats instead of just one. Require build back-ends to update pyproject.toml when generating an sdist When this PEP was written, sdists did not require having static, canonical metadata like this PEP does. The idea was then considered to use this PEP as a way to get such metadata into sdists. In the end, though, the idea of updating pyproject.toml was not generally liked, and so the idea was rejected in favour of separately pursuing standardizing metadata in sdists. Allow tools to add/extend data In an earlier version of this PEP, tools were allowed to extend data for fields. For instance, build back-ends could take the version number and add a local version for when they built the wheel. Tools could also add more trove classifiers for things like the license or supported Python versions. In the end, though, it was thought better to start out stricter and contemplate loosening how static the data could be considered based on real-world usage. Open Issues None at the moment. Copyright This document is placed in the public domain or under the CC0-1.0-Universal license, whichever is more permissive.
python-peps
2024-10-18T13:23:31.595728
2020-06-22T00:00:00
{ "license": "Public Domain", "url": "https://peps.python.org/pep-0621/", "authors": [ "Brett Cannon" ], "pep_number": "0621", "pandoc_version": "3.5" }
0637
PEP: 637 Title: Support for indexing with keyword arguments Version: $Revision$ Last-Modified: $Date$ Author: Stefano Borini Sponsor: Steven D'Aprano Discussions-To: [email protected] Status: Rejected Type: Standards Track Content-Type: text/x-rst Created: 24-Aug-2020 Python-Version: 3.10 Post-History: 23-Sep-2020 Resolution: https://mail.python.org/archives/list/[email protected]/thread/6TAQ2BEVSJNV4JM2RJYSSYFJUT3INGZD/ Note This PEP has been rejected. In general, the cost of introducing new syntax was not outweighed by the perceived benefits. See the link in the Resolution header field for details. Abstract At present keyword arguments are allowed in function calls, but not in item access. This PEP proposes that Python be extended to allow keyword arguments in item access. The following example shows keyword arguments for ordinary function calls: >>> val = f(1, 2, a=3, b=4) The proposal would extend the syntax to allow a similar construct to indexing operations: >>> val = x[1, 2, a=3, b=4] # getitem >>> x[1, 2, a=3, b=4] = val # setitem >>> del x[1, 2, a=3, b=4] # delitem and would also provide appropriate semantics. Single- and double-star unpacking of arguments is also provided: >>> val = x[*(1, 2), **{a=3, b=4}] # Equivalent to above. This PEP is a successor to PEP 472, which was rejected due to lack of interest in 2019. Since then there's been renewed interest in the feature. Overview Background PEP 472 was opened in 2014. The PEP detailed various use cases and was created by extracting implementation strategies from a broad discussion on the python-ideas mailing list, although no clear consensus was reached on which strategy should be used. Many corner cases have been examined more closely and felt awkward, backward incompatible or both. The PEP was eventually rejected in 2019[1] mostly due to lack of interest for the feature despite its 5 years of existence. However, with the introduction of type hints in PEP 484 the square bracket notation has been used consistently to enrich the typing annotations, e.g. to specify a list of integers as Sequence[int]. Additionally, there has been an expanded growth of packages for data analysis such as pandas and xarray, which use names to describe columns in a table (pandas) or axis in an nd-array (xarray). These packages allow users to access specific data by names, but cannot currently use index notation ([]) for this functionality. As a result, a renewed interest in a more flexible syntax that would allow for named information has been expressed occasionally in many different threads on python-ideas, recently by Caleb Donovick[2] in 2019 and Andras Tantos[3] in 2020. These requests prompted a strong activity on the python-ideas mailing list, where the various options have been re-discussed and a general consensus on an implementation strategy has now been reached. Use cases The following practical use cases present different cases where a keyword specification would improve notation and provide additional value: 1. To provide a more communicative meaning to the index, preventing e.g. accidental inversion of indexes: >>> grid_position[x=3, y=5, z=8] >>> rain_amount[time=0:12, location=location] >>> matrix[row=20, col=40] 2. To enrich the typing notation with keywords, especially during the use of generics: def function(value: MyType[T=int]): 3. In some domain, such as computational physics and chemistry, the use of a notation such as Basis[Z=5] is a Domain Specific Language notation to represent a level of accuracy: >>> low_accuracy_energy = computeEnergy(molecule, BasisSet[Z=3]) 4. Pandas currently uses a notation such as: >>> df[df['x'] == 1] which could be replaced with df[x=1]. 5. xarray has named dimensions. Currently these are handled with functions .isel: >>> data.isel(row=10) # Returns the tenth row which could also be replaced with data[row=10]. A more complex example: >>> # old syntax >>> da.isel(space=0, time=slice(None, 2))[...] = spam >>> # new syntax >>> da[space=0, time=:2] = spam Another example: >>> # old syntax >>> ds["empty"].loc[dict(lon=5, lat=6)] = 10 >>> # new syntax >>> ds["empty"][lon=5, lat=6] = 10 >>> # old syntax >>> ds["empty"].loc[dict(lon=slice(1, 5), lat=slice(3, None))] = 10 >>> # new syntax >>> ds["empty"][lon=1:5, lat=6:] = 10 6. Functions/methods whose argument is another function (plus its arguments) need some way to determine which arguments are destined for the target function, and which are used to configure how they run the target. This is simple (if non-extensible) for positional parameters, but we need some way to distinguish these for keywords.[4] An indexed notation would afford a Pythonic way to pass keyword arguments to these functions without cluttering the caller's code. >>> # Let's start this example with basic syntax without keywords. >>> # the positional values are arguments to `func` while >>> # `name=` is processed by `trio.run`. >>> trio.run(func, value1, value2, name="func") >>> # `trio.run` ends up calling `func(value1, value2)`. >>> # If we want/need to pass value2 by keyword (keyword-only argument, >>> # additional arguments that won't break backwards compatibility ...), >>> # currently we need to resort to functools.partial: >>> trio.run(functools.partial(func, param2=value2), value1, name="func") >>> trio.run(functools.partial(func, value1, param2=value2), name="func") >>> # One possible workaround is to convert `trio.run` to an object >>> # with a `__call__` method, and use an "option" helper, >>> trio.run.option(name="func")(func, value1, param2=value2) >>> # However, foo(bar)(baz) is uncommon and thus disruptive to the reader. >>> # Also, you need to remember the name of the `option` method. >>> # This PEP allows us to replace `option` with `__getitem__`. >>> # The call is now shorter, more mnemonic, and looks+works like typing >>> trio.run[name="func"](func, value1, param2=value2) 7. Availability of star arguments would benefit PEP 646 Variadic Generics, especially in the forms a[*x] and a[*x, *y, p, q, *z]. The PEP details exactly this notation in its "Unpacking: Star Operator" section. It is important to note that how the notation is interpreted is up to the implementation. This PEP only defines and dictates the behavior of Python regarding passed keyword arguments, not how these arguments should be interpreted and used by the implementing class. Current status of indexing operation Before detailing the new syntax and semantics to the indexing notation, it is relevant to analyse how the indexing notation works today, in which contexts, and how it is different from a function call. Subscripting obj[x] is, effectively, an alternate and specialised form of function call syntax with a number of differences and restrictions compared to obj(x). The current Python syntax focuses exclusively on position to express the index, and also contains syntactic sugar to refer to non-punctiform selection (slices). Some common examples: >>> a[3] # returns the fourth element of 'a' >>> a[1:10:2] # slice notation (extract a non-trivial data subset) >>> a[3, 2] # multiple indexes (for multidimensional arrays) This translates into a __(get|set|del)item__ dunder call which is passed a single parameter containing the index (for __getitem__ and __delitem__) or two parameters containing index and value (for __setitem__). The behavior of the indexing call is fundamentally different from a function call in various aspects: The first difference is in meaning to the reader. A function call says "arbitrary function call potentially with side-effects". An indexing operation says "lookup", typically to point at a subset or specific sub-aspect of an entity (as in the case of typing notation). This fundamental difference means that, while we cannot prevent abuse, implementors should be aware that the introduction of keyword arguments to alter the behavior of the lookup may violate this intrinsic meaning. The second difference of the indexing notation compared to a function is that indexing can be used for both getting and setting operations. In Python, a function cannot be on the left hand side of an assignment. In other words, both of these are valid: >>> x = a[1, 2] >>> a[1, 2] = 5 but only the first one of these is valid: >>> x = f(1, 2) >>> f(1, 2) = 5 # invalid This asymmetry is important, and makes one understand that there is a natural imbalance between the two forms. It is therefore not a given that the two should behave transparently and symmetrically. The third difference is that functions have names assigned to their arguments, unless the passed parameters are captured with *args, in which case they end up as entries in the args tuple. In other words, functions already have anonymous argument semantic, exactly like the indexing operation. However, __(get|set|del)item__ is not always receiving a tuple as the index argument (to be uniform in behavior with *args). In fact, given a trivial class: class X: def __getitem__(self, index): print(index) The index operation basically forwards the content of the square brackets "as is" in the index argument: >>> x=X() >>> x[0] 0 >>> x[0, 1] (0, 1) >>> x[(0, 1)] (0, 1) >>> >>> x[()] () >>> x[{1, 2, 3}] {1, 2, 3} >>> x["hello"] hello >>> x["hello", "hi"] ('hello', 'hi') The fourth difference is that the indexing operation knows how to convert colon notations to slices, thanks to support from the parser. This is valid: a[1:3] this one isn't: f(1:3) The fifth difference is that there's no zero-argument form. This is valid: f() this one isn't: a[] Specification Before describing the specification, it is important to stress the difference in nomenclature between positional index, final index and keyword argument, as it is important to understand the fundamental asymmetries at play. The __(get|set|del)item__ is fundamentally an indexing operation, and the way the element is retrieved, set, or deleted is through an index, the final index. The current status quo is to directly build the final index from what is passed between square brackets, the positional index. In other words, what is passed in the square brackets is trivially used to generate what the code in __getitem__ then uses for the indicisation operation. As we already saw for the dict, d[1] has a positional index of 1 and also a final index of 1 (because it's the element that is then added to the dictionary) and d[1, 2] has positional index of (1, 2) and final index also of (1, 2) (because yet again it's the element that is added to the dictionary). However, the positional index d[1,2:3] is not accepted by the dictionary, because there's no way to transform the positional index into a final index, as the slice object is unhashable. The positional index is what is currently known as the index parameter in __getitem__. Nevertheless, nothing prevents to construct a dictionary-like class that creates the final index by e.g. converting the positional index to a string. This PEP extends the current status quo, and grants more flexibility to create the final index via an enhanced syntax that combines the positional index and keyword arguments, if passed. The above brings an important point across. Keyword arguments, in the context of the index operation, may be used to take indexing decisions to obtain the final index, and therefore will have to accept values that are unconventional for functions. See for example use case 1, where a slice is accepted. The successful implementation of this PEP will result in the following behavior: 1. An empty subscript is still illegal, regardless of context (see Rejected Ideas): obj[] # SyntaxError 2. A single index value remains a single index value when passed: obj[index] # calls type(obj).__getitem__(obj, index) obj[index] = value # calls type(obj).__setitem__(obj, index, value) del obj[index] # calls type(obj).__delitem__(obj, index) This remains the case even if the index is followed by keywords; see point 5 below. 3. Comma-separated arguments are still parsed as a tuple and passed as a single positional argument: obj[spam, eggs] # calls type(obj).__getitem__(obj, (spam, eggs)) obj[spam, eggs] = value # calls type(obj).__setitem__(obj, (spam, eggs), value) del obj[spam, eggs] # calls type(obj).__delitem__(obj, (spam, eggs)) The points above mean that classes which do not want to support keyword arguments in subscripts need do nothing at all, and the feature is therefore completely backwards compatible. 4. Keyword arguments, if any, must follow positional arguments: obj[1, 2, spam=None, 3] # SyntaxError This is like function calls, where intermixing positional and keyword arguments give a SyntaxError. 5. Keyword subscripts, if any, will be handled like they are in function calls. Examples: # Single index with keywords: obj[index, spam=1, eggs=2] # calls type(obj).__getitem__(obj, index, spam=1, eggs=2) obj[index, spam=1, eggs=2] = value # calls type(obj).__setitem__(obj, index, value, spam=1, eggs=2) del obj[index, spam=1, eggs=2] # calls type(obj).__delitem__(obj, index, spam=1, eggs=2) # Comma-separated indices with keywords: obj[foo, bar, spam=1, eggs=2] # calls type(obj).__getitem__(obj, (foo, bar), spam=1, eggs=2) obj[foo, bar, spam=1, eggs=2] = value # calls type(obj).__setitem__(obj, (foo, bar), value, spam=1, eggs=2) del obj[foo, bar, spam=1, eggs=2] # calls type(obj).__detitem__(obj, (foo, bar), spam=1, eggs=2) Note that: - a single positional index will not turn into a tuple just because one adds a keyword value. - for __setitem__, the same order is retained for index and value. The keyword arguments go at the end, as is normal for a function definition. 6. The same rules apply with respect to keyword subscripts as for keywords in function calls: - the interpreter matches up each keyword subscript to a named parameter in the appropriate method; - if a named parameter is used twice, that is an error; - if there are any named parameters left over (without a value) when the keywords are all used, they are assigned their default value (if any); - if any such parameter doesn't have a default, that is an error; - if there are any keyword subscripts remaining after all the named parameters are filled, and the method has a **kwargs parameter, they are bound to the **kwargs parameter as a dict; - but if no **kwargs parameter is defined, it is an error. 7. Sequence unpacking is allowed inside subscripts: obj[*items] This allows notations such as [:, *args, :], which could be treated as [(slice(None), *args, slice(None))]. Multiple star unpacking are allowed: obj[1, *(2, 3), *(4, 5), 6, foo=5] # Equivalent to obj[(1, 2, 3, 4, 5, 6), foo=3) The following notation equivalence must be honored: obj[*()] # Equivalent to obj[()] obj[*(), foo=3] # Equivalent to obj[(), foo=3] obj[*(x,)] # Equivalent to obj[(x,)] obj[*(x,),] # Equivalent to obj[(x,)] Note in particular case 3: sequence unpacking of a single element will not behave as if only one single argument was passed. A related case is the following example: obj[1, *(), foo=5] # Equivalent to obj[(1,), foo=5] # calls type(obj).__getitem__(obj, (1,), foo=5) However, as we saw earlier, for backward compatibility a single index will be passed as is: obj[1, foo=5] # calls type(obj).__getitem__(obj, 1, foo=5) In other words, a single positional index will be passed "as is" only if no sequence unpacking is present. If a sequence unpacking is present, then the index will become a tuple, regardless of the resulting number of elements in the index after the unpacking has taken place. 8. Dict unpacking is permitted: items = {'spam': 1, 'eggs': 2} obj[index, **items] # equivalent to obj[index, spam=1, eggs=2] The following notation equivalent should be honored: obj[**{}] # Equivalent to obj[()] obj[3, **{}] # Equivalent to obj[3] 9. Keyword-only subscripts are permitted. The positional index will be the empty tuple: obj[spam=1, eggs=2] # calls type(obj).__getitem__(obj, (), spam=1, eggs=2) obj[spam=1, eggs=2] = 5 # calls type(obj).__setitem__(obj, (), 5, spam=1, eggs=2) del obj[spam=1, eggs=2] # calls type(obj).__delitem__(obj, (), spam=1, eggs=2) The choice of the empty tuple as a sentinel has been debated. Details are provided in the Rejected Ideas section. 10. Keyword arguments must allow slice syntax: obj[3:4, spam=1:4, eggs=2] # calls type(obj).__getitem__(obj, slice(3, 4, None), spam=slice(1, 4, None), eggs=2) This may open up the possibility to accept the same syntax for general function calls, but this is not part of this recommendation. 11. Keyword arguments allow for default values: # Given type(obj).__getitem__(obj, index, spam=True, eggs=2) obj[3] # Valid. index = 3, spam = True, eggs = 2 obj[3, spam=False] # Valid. index = 3, spam = False, eggs = 2 obj[spam=False] # Valid. index = (), spam = False, eggs = 2 obj[] # Invalid. 12. The same semantics given above must be extended to __class__getitem__: Since PEP 560, type hints are dispatched so that for x[y], if no __getitem__ method is found, and x is a type (class) object, and x has a class method __class_getitem__, that method is called. The same changes should be applied to this method as well, so that a writing like list[T=int] can be accepted. Indexing behavior in standard classes (dict, list, etc.) None of what is proposed in this PEP will change the behavior of the current core classes that use indexing. Adding keywords to the index operation for custom classes is not the same as modifying e.g. the standard dict type to handle keyword arguments. In fact, dict (as well as list and other stdlib classes with indexing semantics) will remain the same and will continue not to accept keyword arguments. In other words, if d is a dict, the statement d[1, a=2] will raise TypeError, as their implementation will not support the use of keyword arguments. The same holds for all other classes (list, dict, etc.) Corner case and Gotchas With the introduction of the new notation, a few corner cases need to be analysed. 1. Technically, if a class defines their getter like this: def __getitem__(self, index): then the caller could call that using keyword syntax, like these two cases: obj[3, index=4] obj[index=1] The resulting behavior would be an error automatically, since it would be like attempting to call the method with two values for the index argument, and a TypeError will be raised. In the first case, the index would be 3, in the second case, it would be the empty tuple (). Note that this behavior applies for all currently existing classes that rely on indexing, meaning that there is no way for the new behavior to introduce backward compatibility issues on this respect. Classes that wish to stress this behavior explicitly can define their parameters as positional-only: def __getitem__(self, index, /): 2. a similar case occurs with setter notation: # Given type(obj).__setitem__(obj, index, value): obj[1, value=3] = 5 This poses no issue because the value is passed automatically, and the Python interpreter will raise TypeError: got multiple values for keyword argument 'value' 3. If the subscript dunders are declared to use positional-or-keyword parameters, there may be some surprising cases when arguments are passed to the method. Given the signature: def __getitem__(self, index, direction='north') if the caller uses this: obj[0, 'south'] they will probably be surprised by the method call: # expected type(obj).__getitem__(obj, 0, direction='south') # but actually get: type(obj).__getitem__(obj, (0, 'south'), direction='north') Solution: best practice suggests that keyword subscripts should be flagged as keyword-only when possible: def __getitem__(self, index, *, direction='north') The interpreter need not enforce this rule, as there could be scenarios where this is the desired behaviour. But linters may choose to warn about subscript methods which don't use the keyword-only flag. 4. As we saw, a single value followed by a keyword argument will not be changed into a tuple, i.e.: d[1, a=3] is treated as __getitem__(d, 1, a=3), NOT __getitem__(d, (1,), a=3). It would be extremely confusing if adding keyword arguments were to change the type of the passed index. In other words, adding a keyword to a single-valued subscript will not change it into a tuple. For those cases where an actual tuple needs to be passed, a proper syntax will have to be used: obj[(1,), a=3] # calls type(obj).__getitem__(obj, (1,), a=3) In this case, the call is passing a single element (which is passed as is, as from rule above), only that the single element happens to be a tuple. Note that this behavior just reveals the truth that the obj[1,] notation is shorthand for obj[(1,)] (and also obj[1] is shorthand for obj[(1)], with the expected behavior). When keywords are present, the rule that you can omit this outermost pair of parentheses is no longer true: obj[1] # calls type(obj).__getitem__(obj, 1) obj[1, a=3] # calls type(obj).__getitem__(obj, 1, a=3) obj[1,] # calls type(obj).__getitem__(obj, (1,)) obj[(1,), a=3] # calls type(obj).__getitem__(obj, (1,), a=3) This is particularly relevant in the case where two entries are passed: obj[1, 2] # calls type(obj).__getitem__(obj, (1, 2)) obj[(1, 2)] # same as above obj[1, 2, a=3] # calls type(obj).__getitem__(obj, (1, 2), a=3) obj[(1, 2), a=3] # calls type(obj).__getitem__(obj, (1, 2), a=3) And particularly when the tuple is extracted as a variable: t = (1, 2) obj[t] # calls type(obj).__getitem__(obj, (1, 2)) obj[t, a=3] # calls type(obj).__getitem__(obj, (1, 2), a=3) Why? because in the case obj[1, 2, a=3] we are passing two elements (which are then packed as a tuple and passed as the index). In the case obj[(1, 2), a=3] we are passing a single element (which is passed as is) which happens to be a tuple. The final result is that they are the same. C Interface Resolution of the indexing operation is performed through a call to the following functions - PyObject_GetItem(PyObject *o, PyObject *key) for the get operation - PyObject_SetItem(PyObject *o, PyObject *key, PyObject *value) for the set operation - PyObject_DelItem(PyObject *o, PyObject *key) for the del operation These functions are used extensively within the Python executable, and are also part of the public C API, as exported by Include/abstract.h. It is clear that the signature of this function cannot be changed, and different C level functions need to be implemented to support the extended call. We propose - PyObject_GetItemWithKeywords(PyObject *o, PyObject *key, PyObject *kwargs) - PyObject_SetItemWithKeywords(PyObject *o, PyObject *key, PyObject *value, PyObject *kwargs) - PyObject_GetItemWithKeywords(PyObject *o, PyObject *key, PyObject *kwargs) New opcodes will be needed for the enhanced call. Currently, the implementation uses BINARY_SUBSCR, STORE_SUBSCR and DELETE_SUBSCR to invoke the old functions. We propose BINARY_SUBSCR_KW, STORE_SUBSCR_KW and DELETE_SUBSCR_KW for the new operations. The compiler will have to generate these new opcodes. The old C implementations will call the extended methods passing NULL as kwargs. Finally, the following new slots must be added to the PyMappingMethods struct: - mp_subscript_kw - mp_ass_subscript_kw These slots will have the appropriate signature to handle the dictionary object containing the keywords. "How to teach" recommendations One request that occurred during feedback sessions was to detail a possible narrative for teaching the feature, e.g. to students, data scientists, and similar audience. This section addresses that need. We will only describe the indexing from the perspective of use, not of implementation, because it is the aspect that the above mentioned audience will likely encounter. Only a subset of the users will have to implement their own dunder functions, and can be considered advanced usage. A proper explanation could be: The indexing operation is generally used to refer to a subset of a larger dataset by means of an index. In the commonly seen cases, the index is made by one or more numbers, strings, slices, etc. Some types may allow indexing to occur not only with the index, but also with named values. These named values are given between square brackets using the same syntax used for function call keyword arguments. The meaning of the names and their use is found in the documentation of the type, as it varies from one type to another. The teacher will now show some practical real world examples, explaining the semantics of the feature in the shown library. At the time of writing these examples do not exist, obviously, but the libraries most likely to implement the feature are pandas and numpy, possibly as a method to refer to columns by name. Reference Implementation A reference implementation is currently being developed here[5]. Workarounds Every PEP that changes the Python language should "clearly explain why the existing language specification is inadequate to address the problem that the PEP solves" <1#what-belongs-in-a-successful-pep>. Some rough equivalents to the proposed extension, which we call work-arounds, are already possible. The work-arounds provide an alternative to enabling the new syntax, while leaving the semantics to be defined elsewhere. These work-arounds follow. In them the helpers H and P are not intended to be universal. For example, a module or package might require the use of its own helpers. 1. User defined classes can be given getitem and delitem methods, that respectively get and delete values stored in a container: >>> val = x.getitem(1, 2, a=3, b=4) >>> x.delitem(1, 2, a=3, b=4) The same can't be done for setitem. It's not valid syntax: >>> x.setitem(1, 2, a=3, b=4) = val SyntaxError: can't assign to function call 2. A helper class, here called H, can be used to swap the container and parameter roles. In other words, we use: H(1, 2, a=3, b=4)[x] as a substitute for: x[1, 2, a=3, b=4] This method will work for getitem, delitem and also for setitem. This is because: >>> H(1, 2, a=3, b=4)[x] = val is valid syntax, which can be given the appropriate semantics. 3. A helper function, here called P, can be used to store the arguments in a single object. For example: >>> x[P(1, 2, a=3, b=4)] = val is valid syntax, and can be given the appropriate semantics. 4. The lo:hi:step syntax for slices is sometimes very useful. This syntax is not directly available in the work-arounds. However: s[lo:hi:step] provides a work-around that is available everything, where: class S: def __getitem__(self, key): return key s = S() defines the helper object s. Rejected Ideas Previous PEP 472 solutions PEP 472 presents a good amount of ideas that are now all to be considered Rejected. A personal email from D'Aprano to the author specifically said: I have now carefully read through PEP 472 in full, and I am afraid I cannot support any of the strategies currently in the PEP. We agree that those options are inferior to the currently presented, for one reason or another. To keep this document compact, we will not present here the objections for all options presented in PEP 472. Suffice to say that they were discussed, and each proposed alternative had one or few dealbreakers. Adding new dunders It was proposed to introduce new dunders __(get|set|del)item_ex__ that are invoked over the __(get|set|del)item__ triad, if they are present. The rationale around this choice is to make the intuition around how to add kwd arg support to square brackets more obvious and in line with the function behavior. Given: def __getitem_ex__(self, x, y): ... These all just work and produce the same result effortlessly: obj[1, 2] obj[1, y=2] obj[y=2, x=1] In other words, this solution would unify the behavior of __getitem__ to the traditional function signature, but since we can't change __getitem__ and break backward compatibility, we would have an extended version that is used preferentially. The problems with this approach were found to be: - It will slow down subscripting. For every subscript access, this new dunder attribute gets investigated on the class, and if it is not present then the default key translation function is executed. Different ideas were proposed to handle this, from wrapping the method only at class instantiation time, to add a bit flag to signal the availability of these methods. Regardess of the solution, the new dunder would be effective only if added at class creation time, not if it's added later. This would be unusual and would disallow (and behave unexpectedly) monkeypatching of the methods for whatever reason it might be needed. - It adds complexity to the mechanism. - Will require a long and painful transition period during which time libraries will have to somehow support both calling conventions, because most likely, the extended methods will delegate to the traditional ones when the right conditions are matched in the arguments, or some classes will support the traditional dunder and others the extended dunder. While this will not affect calling code, it will affect development. - it would potentially lead to mixed situations where the extended version is defined for the getter, but not for the setter. - In the __setitem_ex__ signature, value would have to be made the first element, because the index is of arbitrary length depending on the specified indexes. This would look awkward because the visual notation does not match the signature: obj[1, 2] = 3 # calls type(obj).__setitem_ex__(obj, 3, 1, 2) - the solution relies on the assumption that all keyword indices necessarily map into positional indices, or that they must have a name. This assumption may be false: xarray, which is the primary Python package for numpy arrays with labelled dimensions, supports indexing by additional dimensions (so called "non-dimension coordinates") that don't correspond directly to the dimensions of the underlying numpy array, and those have no position to match up to. In other words, anonymous indexes are a plausible use case that this solution would remove, although it could be argued that using *args would solve that issue. Adding an adapter function Similar to the above, in the sense that a pre-function would be called to convert the "new style" indexing into "old style indexing" that is then passed. Has problems similar to the above. create a new "kwslice" object This proposal has already been explored in "New arguments contents" P4 in PEP 472: obj[a, b:c, x=1] # calls type(obj).__getitem__(obj, a, slice(b, c), key(x=1)) This solution requires everyone who needs keyword arguments to parse the tuple and/or key object by hand to extract them. This is painful and opens up to the get/set/del function to always accept arbitrary keyword arguments, whether they make sense or not. We want the developer to be able to specify which arguments make sense and which ones do not. Using a single bit to change the behavior A special class dunder flag: __keyfn__ = True would change the signature of the __get|set|delitem__ to a "function like" dispatch, meaning that this: >>> d[1, 2, z=3] would result in a call to: >>> type(obj).__getitem__(obj, 1, 2, z=3) # instead of type(obj).__getitem__(obj, (1, 2), z=3) This option has been rejected because it feels odd that a signature of a method depends on a specific value of another dunder. It would be confusing for both static type checkers and for humans: a static type checker would have to hard-code a special case for this, because there really is nothing else in Python where the signature of a dunder depends on the value of another dunder. A human that has to implement a __getitem__ dunder would have to look if in the class (or in any of its subclasses) for a __keyfn__ before the dunder can be written. Moreover, adding a base classes that have the __keyfn__ flag set would break the signature of the current methods. This would be even more problematic if the flag is changed at runtime, or if the flag is generated by calling a function that returns randomly True or something else. Allowing for empty index notation obj[] The current proposal prevents obj[] from being valid notation. However a commenter stated We have Tuple[int, int] as a tuple of two integers. And we have Tuple[int] as a tuple of one integer. And occasionally we need to spell a tuple of no values, since that's the type of (). But we currently are forced to write that as Tuple[()]. If we allowed Tuple[] that odd edge case would be removed. So I probably would be okay with allowing obj[] syntactically, as long as the dict type could be made to reject it. This proposal already established that, in case no positional index is given, the passed value must be the empty tuple. Allowing for the empty index notation would make the dictionary type accept it automatically, to insert or refer to the value with the empty tuple as key. Moreover, a typing notation such as Tuple[] can easily be written as Tuple without the indexing notation. However, subsequent discussion with Brandt Bucher during implementation has revealed that the case obj[] would fit a natural evolution for variadic generics, giving more strength to the above comment. In the end, after a discussion between D'Aprano, Bucher and the author, we decided to leave the obj[] notation as a syntax error for now, and possibly extend the notation with an additional PEP to hold the equivalence obj[] as obj[()]. Sentinel value for no given positional index The topic of which value to pass as the index in the case of: obj[k=3] has been considerably debated. One apparently rational choice would be to pass no value at all, by making use of the keyword only argument feature, but unfortunately will not work well with the __setitem__ dunder, as a positional element for the value is always passed, and we can't "skip over" the index one unless we introduce a very weird behavior where the first argument refers to the index when specified, and to the value when index is not specified. This is extremely deceiving and error prone. The above consideration makes it impossible to have a keyword only dunder, and opens up the question of what entity to pass for the index position when no index is passed: obj[k=3] = 5 # would call type(obj).__setitem__(obj, ???, 5, k=3) A proposed hack would be to let the user specify which entity to use when an index is not specified, by specifying a default for the index, but this forces necessarily to also specify a (never going to be used, as a value is always passed by design) default for the value, as we can't have non-default arguments after defaulted one: def __setitem__(self, index=SENTINEL, value=NEVERUSED, *, k) which seems ugly, redundant and confusing. We must therefore accept that some form of sentinel index must be passed by the Python implementation when the obj[k=3] notation is used. This also means that default arguments to those parameters are simply never going to be used (but it's already the case with the current implementation, so no change there). Additionally, some classes may want to use **kwargs, instead of a keyword-only argument, meaning that having a definition like: def __setitem__(self, index, value, **kwargs): and a user that wants to pass a keyword value: x[value=1] = 0 expecting a call like: type(obj).__setitem__(obj, SENTINEL, 0, **{"value": 1}) will instead accidentally be caught by the named value, producing a duplicate value error. The user should not be worried about the actual local names of those two arguments if they are, for all practical purposes, positional only. Unfortunately, using positional-only values will ensure this does not happen but it will still not solve the need to pass both index and value even when the index is not provided. The point is that the user should not be prevented to use keyword arguments to refer to a column index, value (or self) just because the class implementor happens to use those names in the parameter list. Moreover, we also require the three dunders to behave in the same way: it would be extremely inconvenient if only __setitem__ were to receive this sentinel, and __get|delitem__ would not because they can get away with a signature that allows for no index specification, thus allowing for a user-specified default index. Whatever the choice of the sentinel, it will make the following cases degenerate and thus impossible to differentiate in the dunder: obj[k=3] obj[SENTINEL, k=3] The question now shifts to which entity should represent the sentinel: the options were: 1. Empty tuple 2. None 3. NotImplemented 4. a new sentinel object (e.g. NoIndex) For option 1, the call will become: type(obj).__getitem__(obj, (), k=3) therefore making obj[k=3] and obj[(), k=3] degenerate and indistinguishable. This option sounds appealing because: 1. The numpy community was inquired[6], and the general consensus of the responses was that the empty tuple felt appropriate. 2. It shows a parallel with the behavior of *args in a function, when no positional arguments are given: >>> def foo(*args, **kwargs): ... print(args, kwargs) ... >>> foo(k=3) () {'k': 3} Although we do accept the following asymmetry in behavior compared to functions when a single value is passed, but that ship has sailed: >>> foo(5, k=3) (5,) {'k': 3} # for indexing, a plain 5, not a 1-tuple is passed For option 2, using None, it was objected that NumPy uses it to indicate inserting a new axis/dimensions (there's a np.newaxis alias as well): arr = np.array(5) arr.ndim == 0 arr[None].ndim == arr[None,].ndim == 1 While this is not an insurmountable issue, it certainly will ripple onto numpy. The only issues with both the above is that both the empty tuple and None are potential legitimate indexes, and there might be value in being able to differentiate the two degenerate cases. So, an alternative strategy (option 3) would be to use an existing entity that is unlikely to be used as a valid index. One option could be the current built-in constant NotImplemented, which is currently returned by operators methods to report that they do not implement a particular operation, and a different strategy should be attempted (e.g. to ask the other object). Unfortunately, its name and traditional use calls back to a feature that is not available, rather than the fact that something was not passed by the user. This leaves us with option 4: a new built-in constant. This constant must be unhashable (so it's never going to be a valid key) and have a clear name that makes it obvious its context: NoIndex. This would solve all the above issues, but the question is: is it worth it? From a quick inquire, it seems that most people on python-ideas seem to believe it's not crucial, and the empty tuple is an acceptable option. Hence the resulting series will be: obj[k=3] # type(obj).__getitem__(obj, (), k=3). Empty tuple obj[1, k=3] # type(obj).__getitem__(obj, 1, k=3). Integer obj[1, 2, k=3] # type(obj).__getitem__(obj, (1, 2), k=3). Tuple and the following two notation will be degenerate: obj[(), k=3] # type(obj).__getitem__(obj, (), k=3) obj[k=3] # type(obj).__getitem__(obj, (), k=3) Common objections 1. Just use a method call. One of the use cases is typing, where the indexing is used exclusively, and function calls are out of the question. Moreover, function calls do not handle slice notation, which is commonly used in some cases for arrays. One problem is type hint creation has been extended to built-ins in Python 3.9, so that you do not have to import Dict, List, et al anymore. Without kwdargs inside [], you would not be able to do this: Vector = dict[i=float, j=float] but for obvious reasons, call syntax using builtins to create custom type hints isn't an option: dict(i=float, j=float) # would create a dictionary, not a type Finally, function calls do not allow for a setitem-like notation, as shown in the Overview: operations such as f(1, x=3) = 5 are not allowed, and are instead allowed for indexing operations. References Copyright This document has been placed in the public domain. Local Variables: mode: indented-text indent-tabs-mode: nil sentence-end-double-space: t fill-column: 70 End: [1] "Rejection of PEP 472" (https://mail.python.org/pipermail/python-dev/2019-March/156693.html) [2] "Allow kwargs in __{getdel}item__" (https://mail.python.org/archives/list/[email protected]/thread/EUGDRTRFIY36K4RM3QRR52CKCI7MIR2M/) [3] "PEP 472 -- Support for indexing with keyword arguments" (https://mail.python.org/archives/list/[email protected]/thread/6OGAFDWCXT5QVV23OZWKBY4TXGZBVYZS/) [4] "trio.run() should take **kwargs in addition to *args" (https://github.com/python-trio/trio/issues/470) [5] "Reference implementation" (https://github.com/python/cpython/compare/master...stefanoborini:PEP-637-implementation-attempt-2) [6] "[Numpy-discussion] Request for comments on PEP 637 - Support for indexing with keyword arguments" (http://numpy-discussion.10968.n7.nabble.com/Request-for-comments-on-PEP-637-Support-for-indexing-with-keyword-arguments-td48489.html)
python-peps
2024-10-18T13:23:31.639643
2020-08-24T00:00:00
{ "license": "Public Domain", "url": "https://peps.python.org/pep-0637/", "authors": [ "Stefano Borini" ], "pep_number": "0637", "pandoc_version": "3.5" }
0329
PEP: 329 Title: Treating Builtins as Constants in the Standard Library Version: $Revision$ Last-Modified: $Date$ Author: Raymond Hettinger <[email protected]> Status: Rejected Type: Standards Track Content-Type: text/x-rst Created: 18-Apr-2004 Python-Version: 2.4 Post-History: 18-Apr-2004 Abstract The proposal is to add a function for treating builtin references as constants and to apply that function throughout the standard library. Status The PEP is self rejected by the author. Though the ASPN recipe was well received, there was less willingness to consider this for inclusion in the core distribution. The Jython implementation does not use byte codes, so its performance would suffer if the current _len=len optimizations were removed. Also, altering byte codes is one of the least clean ways to improve performance and enable cleaner coding. A more robust solution would likely involve compiler pragma directives or metavariables indicating what can be optimized (similar to const/volatile declarations). Motivation The library contains code such as _len=len which is intended to create fast local references instead of slower global lookups. Though necessary for performance, these constructs clutter the code and are usually incomplete (missing many opportunities). If the proposal is adopted, those constructs could be eliminated from the code base and at the same time improve upon their results in terms of performance. There are currently over a hundred instances of while 1 in the library. They were not replaced with the more readable while True because of performance reasons (the compiler cannot eliminate the test because True is not known to always be a constant). Conversion of True to a constant will clarify the code while retaining performance. Many other basic Python operations run much slower because of global lookups. In try/except statements, the trapped exceptions are dynamically looked up before testing whether they match. Similarly, simple identity tests such as while x is not None require the None variable to be re-looked up on every pass. Builtin lookups are especially egregious because the enclosing global scope must be checked first. These lookup chains devour cache space that is best used elsewhere. In short, if the proposal is adopted, the code will become cleaner and performance will improve across the board. Proposal Add a module called codetweaks.py which contains two functions, bind_constants() and bind_all(). The first function performs constant binding and the second recursively applies it to every function and class in a target module. For most modules in the standard library, add a pair of lines near the end of the script: import codetweaks, sys codetweaks.bind_all(sys.modules[__name__]) In addition to binding builtins, there are some modules (like sre_compile) where it also makes sense to bind module variables as well as builtins into constants. Questions and Answers 1. Will this make everyone divert their attention to optimization issues? Because it is done automatically, it reduces the need to think about optimizations. 2. In a nutshell, how does it work? Every function has attributes with its bytecodes (the language of the Python virtual machine) and a table of constants. The bind function scans the bytecodes for a LOAD_GLOBAL instruction and checks to see whether the value is already known. If so, it adds that value to the constants table and replaces the opcode with LOAD_CONSTANT. 3. When does it work? When a module is imported for the first time, python compiles the bytecode and runs the binding optimization. Subsequent imports just re-use the previous work. Each session repeats this process (the results are not saved in pyc files). 4. How do you know this works? I implemented it, applied it to every module in library, and the test suite ran without exception. 5. What if the module defines a variable shadowing a builtin? This does happen. For instance, True can be redefined at the module level as True = (1==1). The sample implementation below detects the shadowing and leaves the global lookup unchanged. 6. Are you the first person to recognize that most global lookups are for values that never change? No, this has long been known. Skip Montanaro provides an eloquent explanation in PEP 266. 7. What if I want to replace the builtins module and supply my own implementations? Either do this before importing a module, or just reload the module, or disable codetweaks.py (it will have a disable flag). 8. How susceptible is this module to changes in Python's byte coding? It imports opcode.py to protect against renumbering. Also, it uses LOAD_CONST and LOAD_GLOBAL which are fundamental and have been around forever. That notwithstanding, the coding scheme could change and this implementation would have to change along with modules like dis which also rely on the current coding scheme. 9. What is the effect on startup time? I could not measure a difference. None of the startup modules are bound except for warnings.py. Also, the binding function is very fast, making just a single pass over the code string in search of the LOAD_GLOBAL opcode. Sample Implementation Here is a sample implementation for codetweaks.py: from types import ClassType, FunctionType from opcode import opmap, HAVE_ARGUMENT, EXTENDED_ARG LOAD_GLOBAL, LOAD_CONST = opmap['LOAD_GLOBAL'], opmap['LOAD_CONST'] ABORT_CODES = (EXTENDED_ARG, opmap['STORE_GLOBAL']) def bind_constants(f, builtin_only=False, stoplist=[], verbose=False): """ Return a new function with optimized global references. Replaces global references with their currently defined values. If not defined, the dynamic (runtime) global lookup is left undisturbed. If builtin_only is True, then only builtins are optimized. Variable names in the stoplist are also left undisturbed. If verbose is True, prints each substitution as is occurs. """ import __builtin__ env = vars(__builtin__).copy() stoplist = dict.fromkeys(stoplist) if builtin_only: stoplist.update(f.func_globals) else: env.update(f.func_globals) co = f.func_code newcode = map(ord, co.co_code) newconsts = list(co.co_consts) codelen = len(newcode) i = 0 while i < codelen: opcode = newcode[i] if opcode in ABORT_CODES: return f # for simplicity, only optimize common cases if opcode == LOAD_GLOBAL: oparg = newcode[i+1] + (newcode[i+2] << 8) name = co.co_names[oparg] if name in env and name not in stoplist: value = env[name] try: pos = newconsts.index(value) except ValueError: pos = len(newconsts) newconsts.append(value) newcode[i] = LOAD_CONST newcode[i+1] = pos & 0xFF newcode[i+2] = pos >> 8 if verbose: print name, '-->', value i += 1 if opcode >= HAVE_ARGUMENT: i += 2 codestr = ''.join(map(chr, newcode)) codeobj = type(co)(co.co_argcount, co.co_nlocals, co.co_stacksize, co.co_flags, codestr, tuple(newconsts), co.co_names, co.co_varnames, co.co_filename, co.co_name, co.co_firstlineno, co.co_lnotab, co.co_freevars, co.co_cellvars) return type(f)(codeobj, f.func_globals, f.func_name, f.func_defaults, f.func_closure) def bind_all(mc, builtin_only=False, stoplist=[], verbose=False): """Recursively apply bind_constants() to functions in a module or class. Use as the last line of the module (after everything is defined, but before test code). In modules that need modifiable globals, set builtin_only to True. """ for k, v in vars(mc).items(): if type(v) is FunctionType: newv = bind_constants(v, builtin_only, stoplist, verbose) setattr(mc, k, newv) elif type(v) in (type, ClassType): bind_all(v, builtin_only, stoplist, verbose) def f(): pass try: f.func_code.code except AttributeError: # detect non-CPython environments bind_all = lambda *args, **kwds: 0 del f import sys bind_all(sys.modules[__name__]) # Optimizer, optimize thyself! Note the automatic detection of a non-CPython environment that does not have bytecodes[1]. In that situation, the bind functions would simply return the original function unchanged. This assures that the two line additions to library modules do not impact other implementations. The final code should add a flag to make it easy to disable binding. References [1] ASPN Recipe for a non-private implementation https://code.activestate.com/recipes/277940/ Copyright This document has been placed in the public domain. [1] Differences between CPython and Jython https://web.archive.org/web/20031018014238/http://www.jython.org/cgi-bin/faqw.py?req=show&file=faq01.003.htp
python-peps
2024-10-18T13:23:31.653776
2004-04-18T00:00:00
{ "license": "Public Domain", "url": "https://peps.python.org/pep-0329/", "authors": [ "Raymond Hettinger" ], "pep_number": "0329", "pandoc_version": "3.5" }
0489
PEP: 489 Title: Multi-phase extension module initialization Version: $Revision$ Last-Modified: $Date$ Author: Petr Viktorin <[email protected]>, Stefan Behnel <[email protected]>, Alyssa Coghlan <[email protected]> BDFL-Delegate: Eric Snow <[email protected]> Discussions-To: [email protected] Status: Final Type: Standards Track Content-Type: text/x-rst Created: 11-Aug-2013 Python-Version: 3.5 Post-History: 23-Aug-2013, 20-Feb-2015, 16-Apr-2015, 07-May-2015, 18-May-2015 Resolution: https://mail.python.org/pipermail/python-dev/2015-May/140108.html Abstract This PEP proposes a redesign of the way in which built-in and extension modules interact with the import machinery. This was last revised for Python 3.0 in PEP 3121, but did not solve all problems at the time. The goal is to solve import-related problems by bringing extension modules closer to the way Python modules behave; specifically to hook into the ModuleSpec-based loading mechanism introduced in PEP 451. This proposal draws inspiration from PyType_Spec of PEP 384 to allow extension authors to only define features they need, and to allow future additions to extension module declarations. Extensions modules are created in a two-step process, fitting better into the ModuleSpec architecture, with parallels to __new__ and __init__ of classes. Extension modules can safely store arbitrary C-level per-module state in the module that is covered by normal garbage collection and supports reloading and sub-interpreters. Extension authors are encouraged to take these issues into account when using the new API. The proposal also allows extension modules with non-ASCII names. Not all problems tackled in PEP 3121 are solved in this proposal. In particular, problems with run-time module lookup (PyState_FindModule) are left to a future PEP. Motivation Python modules and extension modules are not being set up in the same way. For Python modules, the module object is created and set up first, then the module code is being executed (PEP 302). A ModuleSpec object (PEP 451) is used to hold information about the module, and passed to the relevant hooks. For extensions (i.e. shared libraries) and built-in modules, the module init function is executed straight away and does both the creation and initialization. The initialization function is not passed the ModuleSpec, or any information it contains, such as the __file__ or fully-qualified name. This hinders relative imports and resource loading. In Py3, modules are also not being added to sys.modules, which means that a (potentially transitive) re-import of the module will really try to re-import it and thus run into an infinite loop when it executes the module init function again. Without access to the fully-qualified module name, it is not trivial to correctly add the module to sys.modules either. This is specifically a problem for Cython generated modules, for which it's not uncommon that the module init code has the same level of complexity as that of any 'regular' Python module. Also, the lack of __file__ and __name__ information hinders the compilation of "__init__.py" modules, i.e. packages, especially when relative imports are being used at module init time. Furthermore, the majority of currently existing extension modules has problems with sub-interpreter support and/or interpreter reloading, and, while it is possible with the current infrastructure to support these features, it is neither easy nor efficient. Addressing these issues was the goal of PEP 3121, but many extensions, including some in the standard library, took the least-effort approach to porting to Python 3, leaving these issues unresolved. This PEP keeps backwards compatibility, which should reduce pressure and give extension authors adequate time to consider these issues when porting. The current process Currently, extension and built-in modules export an initialization function named "PyInit_modulename", named after the file name of the shared library. This function is executed by the import machinery and must return a fully initialized module object. The function receives no arguments, so it has no way of knowing about its import context. During its execution, the module init function creates a module object based on a PyModuleDef object. It then continues to initialize it by adding attributes to the module dict, creating types, etc. In the back, the shared library loader keeps a note of the fully qualified module name of the last module that it loaded, and when a module gets created that has a matching name, this global variable is used to determine the fully qualified name of the module object. This is not entirely safe as it relies on the module init function creating its own module object first, but this assumption usually holds in practice. The proposal The initialization function (PyInit_modulename) will be allowed to return a pointer to a PyModuleDef object. The import machinery will be in charge of constructing the module object, calling hooks provided in the PyModuleDef in the relevant phases of initialization (as described below). This multi-phase initialization is an additional possibility. Single-phase initialization, the current practice of returning a fully initialized module object, will still be accepted, so existing code will work unchanged, including binary compatibility. The PyModuleDef structure will be changed to contain a list of slots, similarly to PEP 384's PyType_Spec for types. To keep binary compatibility, and avoid needing to introduce a new structure (which would introduce additional supporting functions and per-module storage), the currently unused m_reload pointer of PyModuleDef will be changed to hold the slots. The structures are defined as: typedef struct { int slot; void *value; } PyModuleDef_Slot; typedef struct PyModuleDef { PyModuleDef_Base m_base; const char* m_name; const char* m_doc; Py_ssize_t m_size; PyMethodDef *m_methods; PyModuleDef_Slot *m_slots; /* changed from `inquiry m_reload;` */ traverseproc m_traverse; inquiry m_clear; freefunc m_free; } PyModuleDef; The m_slots member must be either NULL, or point to an array of PyModuleDef_Slot structures, terminated by a slot with id set to 0 (i.e. {0, NULL}). To specify a slot, a unique slot ID must be provided. New Python versions may introduce new slot IDs, but slot IDs will never be recycled. Slots may get deprecated, but will continue to be supported throughout Python 3.x. A slot's value pointer may not be NULL, unless specified otherwise in the slot's documentation. The following slots are currently available, and described later: - Py_mod_create - Py_mod_exec Unknown slot IDs will cause the import to fail with SystemError. When using multi-phase initialization, the m_name field of PyModuleDef will not be used during importing; the module name will be taken from the ModuleSpec. Before it is returned from PyInit*, the PyModuleDef object must be initialized using the newly added PyModuleDef_Init function. This sets the object type (which cannot be done statically on certain compilers), refcount, and internal bookkeeping data (m_index). For example, an extension module "example" would be exported as: static PyModuleDef example_def = {...} PyMODINIT_FUNC PyInit_example(void) { return PyModuleDef_Init(&example_def); } The PyModuleDef object must be available for the lifetime of the module created from it – usually, it will be declared statically. Pseudo-code Overview Here is an overview of how the modified importers will operate. Details such as logging or handling of errors and invalid states are left out, and C code is presented with a concise Python-like syntax. The framework that calls the importers is explained in 451#how-loading-will-work. importlib/_bootstrap.py: class BuiltinImporter: def create_module(self, spec): module = _imp.create_builtin(spec) def exec_module(self, module): _imp.exec_dynamic(module) def load_module(self, name): # use a backwards compatibility shim _load_module_shim(self, name) importlib/_bootstrap_external.py: class ExtensionFileLoader: def create_module(self, spec): module = _imp.create_dynamic(spec) def exec_module(self, module): _imp.exec_dynamic(module) def load_module(self, name): # use a backwards compatibility shim _load_module_shim(self, name) Python/import.c (the _imp module): def create_dynamic(spec): name = spec.name path = spec.origin # Find an already loaded module that used single-phase init. # For multi-phase initialization, mod is NULL, so a new module # is always created. mod = _PyImport_FindExtensionObject(name, name) if mod: return mod return _PyImport_LoadDynamicModuleWithSpec(spec) def exec_dynamic(module): if not isinstance(module, types.ModuleType): # non-modules are skipped -- PyModule_GetDef fails on them return def = PyModule_GetDef(module) state = PyModule_GetState(module) if state is NULL: PyModule_ExecDef(module, def) def create_builtin(spec): name = spec.name # Find an already loaded module that used single-phase init. # For multi-phase initialization, mod is NULL, so a new module # is always created. mod = _PyImport_FindExtensionObject(name, name) if mod: return mod for initname, initfunc in PyImport_Inittab: if name == initname: m = initfunc() if isinstance(m, PyModuleDef): def = m return PyModule_FromDefAndSpec(def, spec) else: # fall back to single-phase initialization module = m _PyImport_FixupExtensionObject(module, name, name) return module Python/importdl.c: def _PyImport_LoadDynamicModuleWithSpec(spec): path = spec.origin package, dot, name = spec.name.rpartition('.') # see the "Non-ASCII module names" section for export_hook_name hook_name = export_hook_name(name) # call platform-specific function for loading exported function # from shared library exportfunc = _find_shared_funcptr(hook_name, path) m = exportfunc() if isinstance(m, PyModuleDef): def = m return PyModule_FromDefAndSpec(def, spec) module = m # fall back to single-phase initialization .... Objects/moduleobject.c: def PyModule_FromDefAndSpec(def, spec): name = spec.name create = None for slot, value in def.m_slots: if slot == Py_mod_create: create = value if create: m = create(spec, def) else: m = PyModule_New(name) if isinstance(m, types.ModuleType): m.md_state = None m.md_def = def if def.m_methods: PyModule_AddFunctions(m, def.m_methods) if def.m_doc: PyModule_SetDocString(m, def.m_doc) def PyModule_ExecDef(module, def): if isinstance(module, types.module_type): if module.md_state is NULL: # allocate a block of zeroed-out memory module.md_state = _alloc(module.md_size) if def.m_slots is NULL: return for slot, value in def.m_slots: if slot == Py_mod_exec: value(module) Module Creation Phase Creation of the module object – that is, the implementation of ExecutionLoader.create_module – is governed by the Py_mod_create slot. The Py_mod_create slot The Py_mod_create slot is used to support custom module subclasses. The value pointer must point to a function with the following signature: PyObject* (*PyModuleCreateFunction)(PyObject *spec, PyModuleDef *def) The function receives a ModuleSpec instance, as defined in PEP 451, and the PyModuleDef structure. It should return a new module object, or set an error and return NULL. This function is not responsible for setting import-related attributes specified in 451#attributes (such as __name__ or __loader__) on the new module. There is no requirement for the returned object to be an instance of types.ModuleType. Any type can be used, as long as it supports setting and getting attributes, including at least the import-related attributes. However, only ModuleType instances support module-specific functionality such as per-module state and processing of execution slots. If something other than a ModuleType subclass is returned, no execution slots may be defined; if any are, a SystemError is raised. Note that when this function is called, the module's entry in sys.modules is not populated yet. Attempting to import the same module again (possibly transitively), may lead to an infinite loop. Extension authors are advised to keep Py_mod_create minimal, an in particular to not call user code from it. Multiple Py_mod_create slots may not be specified. If they are, import will fail with SystemError. If Py_mod_create is not specified, the import machinery will create a normal module object using PyModule_New. The name is taken from spec. Post-creation steps If the Py_mod_create function returns an instance of types.ModuleType or a subclass (or if a Py_mod_create slot is not present), the import machinery will associate the PyModuleDef with the module. This also makes the PyModuleDef accessible to execution phase, the PyModule_GetDef function, and garbage collection routines (traverse, clear, free). If the Py_mod_create function does not return a module subclass, then m_size must be 0, and m_traverse, m_clear and m_free must all be NULL. Otherwise, SystemError is raised. Additionally, initial attributes specified in the PyModuleDef are set on the module object, regardless of its type: - The docstring is set from m_doc, if non-NULL. - The module's functions are initialized from m_methods, if any. Module Execution Phase Module execution -- that is, the implementation of ExecutionLoader.exec_module -- is governed by "execution slots". This PEP only adds one, Py_mod_exec, but others may be added in the future. The execution phase is done on the PyModuleDef associated with the module object. For objects that are not a subclass of PyModule_Type (for which PyModule_GetDef would fail), the execution phase is skipped. Execution slots may be specified multiple times, and are processed in the order they appear in the slots array. When using the default import machinery, they are processed after import-related attributes specified in 451#attributes (such as __name__ or __loader__) are set and the module is added to sys.modules. Pre-Execution steps Before processing the execution slots, per-module state is allocated for the module. From this point on, per-module state is accessible through PyModule_GetState. The Py_mod_exec slot The entry in this slot must point to a function with the following signature: int (*PyModuleExecFunction)(PyObject* module) It will be called to initialize a module. Usually, this amounts to setting the module's initial attributes. The "module" argument receives the module object to initialize. The function must return 0 on success, or, on error, set an exception and return -1. If PyModuleExec replaces the module's entry in sys.modules, the new object will be used and returned by importlib machinery after all execution slots are processed. This is a feature of the import machinery itself. The slots themselves are all processed using the module returned from the creation phase; sys.modules is not consulted during the execution phase. (Note that for extension modules, implementing Py_mod_create is usually a better solution for using custom module objects.) Legacy Init The backwards-compatible single-phase initialization continues to be supported. In this scheme, the PyInit function returns a fully initialized module rather than a PyModuleDef object. In this case, the PyInit hook implements the creation phase, and the execution phase is a no-op. Modules that need to work unchanged on older versions of Python should stick to single-phase initialization, because the benefits it brings can't be back-ported. Here is an example of a module that supports multi-phase initialization, and falls back to single-phase when compiled for an older version of CPython. It is included mainly as an illustration of the changes needed to enable multi-phase init: #include <Python.h> static int spam_exec(PyObject *module) { PyModule_AddStringConstant(module, "food", "spam"); return 0; } #ifdef Py_mod_exec static PyModuleDef_Slot spam_slots[] = { {Py_mod_exec, spam_exec}, {0, NULL} }; #endif static PyModuleDef spam_def = { PyModuleDef_HEAD_INIT, /* m_base */ "spam", /* m_name */ PyDoc_STR("Utilities for cooking spam"), /* m_doc */ 0, /* m_size */ NULL, /* m_methods */ #ifdef Py_mod_exec spam_slots, /* m_slots */ #else NULL, #endif NULL, /* m_traverse */ NULL, /* m_clear */ NULL, /* m_free */ }; PyMODINIT_FUNC PyInit_spam(void) { #ifdef Py_mod_exec return PyModuleDef_Init(&spam_def); #else PyObject *module; module = PyModule_Create(&spam_def); if (module == NULL) return NULL; if (spam_exec(module) != 0) { Py_DECREF(module); return NULL; } return module; #endif } Built-In modules Any extension module can be used as a built-in module by linking it into the executable, and including it in the inittab (either at runtime with PyImport_AppendInittab, or at configuration time, using tools like freeze). To keep this possibility, all changes to extension module loading introduced in this PEP will also apply to built-in modules. The only exception is non-ASCII module names, explained below. Subinterpreters and Interpreter Reloading Extensions using the new initialization scheme are expected to support subinterpreters and multiple Py_Initialize/Py_Finalize cycles correctly, avoiding the issues mentioned in Python documentation[1]. The mechanism is designed to make this easy, but care is still required on the part of the extension author. No user-defined functions, methods, or instances may leak to different interpreters. To achieve this, all module-level state should be kept in either the module dict, or in the module object's storage reachable by PyModule_GetState. A simple rule of thumb is: Do not define any static data, except built-in types with no mutable or user-settable class attributes. Functions incompatible with multi-phase initialization The PyModule_Create function will fail when used on a PyModuleDef structure with a non-NULL m_slots pointer. The function doesn't have access to the ModuleSpec object necessary for multi-phase initialization. The PyState_FindModule function will return NULL, and PyState_AddModule and PyState_RemoveModule will also fail on modules with non-NULL m_slots. PyState registration is disabled because multiple module objects may be created from the same PyModuleDef. Module state and C-level callbacks Due to the unavailability of PyState_FindModule, any function that needs access to module-level state (including functions, classes or exceptions defined at the module level) must receive a reference to the module object (or the particular object it needs), either directly or indirectly. This is currently difficult in two situations: - Methods of classes, which receive a reference to the class, but not to the class's module - Libraries with C-level callbacks, unless the callbacks can receive custom data set at callback registration Fixing these cases is outside of the scope of this PEP, but will be needed for the new mechanism to be useful to all modules. Proper fixes have been discussed on the import-sig mailing list[2]. As a rule of thumb, modules that rely on PyState_FindModule are, at the moment, not good candidates for porting to the new mechanism. New Functions A new function and macro implementing the module creation phase will be added. These are similar to PyModule_Create and PyModule_Create2, except they take an additional ModuleSpec argument, and handle module definitions with non-NULL slots: PyObject * PyModule_FromDefAndSpec(PyModuleDef *def, PyObject *spec) PyObject * PyModule_FromDefAndSpec2(PyModuleDef *def, PyObject *spec, int module_api_version) A new function implementing the module execution phase will be added. This allocates per-module state (if not allocated already), and always processes execution slots. The import machinery calls this method when a module is executed, unless the module is being reloaded: PyAPI_FUNC(int) PyModule_ExecDef(PyObject *module, PyModuleDef *def) Another function will be introduced to initialize a PyModuleDef object. This idempotent function fills in the type, refcount, and module index. It returns its argument cast to PyObject*, so it can be returned directly from a PyInit function: PyObject * PyModuleDef_Init(PyModuleDef *); Additionally, two helpers will be added for setting the docstring and methods on a module: int PyModule_SetDocString(PyObject *, const char *) int PyModule_AddFunctions(PyObject *, PyMethodDef *) Export Hook Name As portable C identifiers are limited to ASCII, module names must be encoded to form the PyInit hook name. For ASCII module names, the import hook is named PyInit<modulename>, where <modulename> is the name of the module. For module names containing non-ASCII characters, the import hook is named PyInitU<encodedname>, where the name is encoded using CPython's "punycode" encoding (Punycode <3492> with a lowercase suffix), with hyphens ("-") replaced by underscores ("_"). In Python: def export_hook_name(name): try: suffix = b'_' + name.encode('ascii') except UnicodeEncodeError: suffix = b'U_' + name.encode('punycode').replace(b'-', b'_') return b'PyInit' + suffix Examples: +-------------+---------------------+ | Module name | Init hook name | +=============+=====================+ | spam | PyInit_spam | +-------------+---------------------+ | lančmít | PyInitU_lanmt_2sa6t | +-------------+---------------------+ | スパム | PyInitU_zck5b2b | +-------------+---------------------+ For modules with non-ASCII names, single-phase initialization is not supported. In the initial implementation of this PEP, built-in modules with non-ASCII names will not be supported. Module Reloading Reloading an extension module using importlib.reload() will continue to have no effect, except re-setting import-related attributes. Due to limitations in shared library loading (both dlopen on POSIX and LoadModuleEx on Windows), it is not generally possible to load a modified library after it has changed on disk. Use cases for reloading other than trying out a new version of the module are too rare to require all module authors to keep reloading in mind. If reload-like functionality is needed, authors can export a dedicated function for it. Multiple modules in one library To support multiple Python modules in one shared library, the library can export additional PyInit* symbols besides the one that corresponds to the library's filename. Note that this mechanism can currently only be used to load extra modules, but not to find them. (This is a limitation of the loader mechanism, which this PEP does not try to modify.) To work around the lack of a suitable finder, code like the following can be used: import importlib.machinery import importlib.util loader = importlib.machinery.ExtensionFileLoader(name, path) spec = importlib.util.spec_from_loader(name, loader) module = importlib.util.module_from_spec(spec) loader.exec_module(module) return module On platforms that support symbolic links, these may be used to install one library under multiple names, exposing all exported modules to normal import machinery. Testing and initial implementations For testing, a new built-in module _testmultiphase will be created. The library will export several additional modules using the mechanism described in "Multiple modules in one library". The _testcapi module will be unchanged, and will use single-phase initialization indefinitely (or until it is no longer supported). The array and xx* modules will be converted to use multi-phase initialization as part of the initial implementation. Summary of API Changes and Additions New functions: - PyModule_FromDefAndSpec (macro) - PyModule_FromDefAndSpec2 - PyModule_ExecDef - PyModule_SetDocString - PyModule_AddFunctions - PyModuleDef_Init New macros: - Py_mod_create - Py_mod_exec New types: - PyModuleDef_Type will be exposed New structures: - PyModuleDef_Slot Other changes: PyModuleDef.m_reload changes to PyModuleDef.m_slots. BuiltinImporter and ExtensionFileLoader will now implement create_module and exec_module. The internal _imp module will have backwards incompatible changes: create_builtin, create_dynamic, and exec_dynamic will be added; init_builtin, load_dynamic will be removed. The undocumented functions imp.load_dynamic and imp.init_builtin will be replaced by backwards-compatible shims. Backwards Compatibility Existing modules will continue to be source- and binary-compatible with new versions of Python. Modules that use multi-phase initialization will not be compatible with versions of Python that do not implement this PEP. The functions init_builtin and load_dynamic will be removed from the _imp module (but not from the imp module). All changed loaders (BuiltinImporter and ExtensionFileLoader) will remain backwards-compatible; the load_module method will be replaced by a shim. Internal functions of Python/import.c and Python/importdl.c will be removed. (Specifically, these are _PyImport_GetDynLoadFunc, _PyImport_GetDynLoadWindows, and _PyImport_LoadDynamicModule.) Possible Future Extensions The slots mechanism, inspired by PyType_Slot from PEP 384, allows later extensions. Some extension modules exports many constants; for example _ssl has a long list of calls in the form: PyModule_AddIntConstant(m, "SSL_ERROR_ZERO_RETURN", PY_SSL_ERROR_ZERO_RETURN); Converting this to a declarative list, similar to PyMethodDef, would reduce boilerplate, and provide free error-checking which is often missing. String constants and types can be handled similarly. (Note that non-default bases for types cannot be portably specified statically; this case would need a Py_mod_exec function that runs before the slots are added. The free error-checking would still be beneficial, though.) Another possibility is providing a "main" function that would be run when the module is given to Python's -m switch. For this to work, the runpy module will need to be modified to take advantage of ModuleSpec-based loading introduced in PEP 451. Also, it will be necessary to add a mechanism for setting up a module according to slots it wasn't originally defined with. Implementation Work-in-progress implementation is available in a Github repository[3]; a patchset is at[4]. Previous Approaches Stefan Behnel's initial proto-PEP[5] had a "PyInit_modulename" hook that would create a module class, whose __init__ would be then called to create the module. This proposal did not correspond to the (then nonexistent) PEP 451, where module creation and initialization is broken into distinct steps. It also did not support loading an extension into pre-existing module objects. Alyssa (Nick) Coghlan proposed "Create" and "Exec" hooks, and wrote a prototype implementation[6]. At this time PEP 451 was still not implemented, so the prototype does not use ModuleSpec. The original version of this PEP used Create and Exec hooks, and allowed loading into arbitrary pre-constructed objects with Exec hook. The proposal made extension module initialization closer to how Python modules are initialized, but it was later recognized that this isn't an important goal. The current PEP describes a simpler solution. A further iteration used a "PyModuleExport" hook as an alternative to PyInit, where PyInit was used for existing scheme, and PyModuleExport for multi-phase. However, not being able to determine the hook name based on module name complicated automatic generation of PyImport_Inittab by tools like freeze. Keeping only the PyInit hook name, even if it's not entirely appropriate for exporting a definition, yielded a much simpler solution. References Copyright This document has been placed in the public domain. [1] https://docs.python.org/3/c-api/init.html#sub-interpreter-support [2] https://mail.python.org/pipermail/import-sig/2015-April/000959.html [3] https://github.com/encukou/cpython/commits/pep489 [4] https://github.com/encukou/cpython/compare/master...encukou:pep489.patch [5] https://mail.python.org/pipermail/python-dev/2013-August/128087.html [6] https://mail.python.org/pipermail/python-dev/2013-August/128101.html
python-peps
2024-10-18T13:23:31.682327
2013-08-11T00:00:00
{ "license": "Public Domain", "url": "https://peps.python.org/pep-0489/", "authors": [ "Petr Viktorin" ], "pep_number": "0489", "pandoc_version": "3.5" }
0508
PEP: 508 Title: Dependency specification for Python Software Packages Version: $Revision$ Last-Modified: $Date$ Author: Robert Collins <[email protected]> BDFL-Delegate: Donald Stufft <[email protected]> Discussions-To: [email protected] Status: Final Type: Standards Track Topic: Packaging Content-Type: text/x-rst Created: 11-Nov-2015 Post-History: 05-Nov-2015, 16-Nov-2015 Resolution: https://mail.python.org/pipermail/distutils-sig/2015-November/027868.html Abstract This PEP specifies the language used to describe dependencies for packages. It draws a border at the edge of describing a single dependency - the different sorts of dependencies and when they should be installed is a higher level problem. The intent is to provide a building block for higher layer specifications. The job of a dependency is to enable tools like pip[1] to find the right package to install. Sometimes this is very loose - just specifying a name, and sometimes very specific - referring to a specific file to install. Sometimes dependencies are only relevant in one platform, or only some versions are acceptable, so the language permits describing all these cases. The language defined is a compact line based format which is already in widespread use in pip requirements files, though we do not specify the command line option handling that those files permit. There is one caveat - the URL reference form, specified in PEP 440 is not actually implemented in pip, but since PEP 440 is accepted, we use that format rather than pip's current native format. Motivation Any specification in the Python packaging ecosystem that needs to consume lists of dependencies needs to build on an approved PEP for such, but PEP 426 is mostly aspirational - and there are already existing implementations of the dependency specification which we can instead adopt. The existing implementations are battle proven and user friendly, so adopting them is arguably much better than approving an aspirational, unconsumed, format. Specification Examples All features of the language shown with a name based lookup: requests [security,tests] >= 2.8.1, == 2.8.* ; python_version < "2.7" A minimal URL based lookup: pip @ https://github.com/pypa/pip/archive/1.3.1.zip#sha1=da9234ee9982d4bbb3c72346a6de940a148ea686 Concepts A dependency specification always specifies a distribution name. It may include extras, which expand the dependencies of the named distribution to enable optional features. The version installed can be controlled using version limits, or giving the URL to a specific artifact to install. Finally the dependency can be made conditional using environment markers. Grammar We first cover the grammar briefly and then drill into the semantics of each section later. A distribution specification is written in ASCII text. We use a parsley [2] grammar to provide a precise grammar. It is expected that the specification will be embedded into a larger system which offers framing such as comments, multiple line support via continuations, or other such features. The full grammar including annotations to build a useful parse tree is included at the end of the PEP. Versions may be specified according to the PEP 440 rules. (Note: URI is defined in std-66 <3986>): version_cmp = wsp* '<' | '<=' | '!=' | '==' | '>=' | '>' | '~=' | '===' version = wsp* ( letterOrDigit | '-' | '_' | '.' | '*' | '+' | '!' )+ version_one = version_cmp version wsp* version_many = version_one (wsp* ',' version_one)* versionspec = ( '(' version_many ')' ) | version_many urlspec = '@' wsp* <URI_reference> Environment markers allow making a specification only take effect in some environments: marker_op = version_cmp | (wsp* 'in') | (wsp* 'not' wsp+ 'in') python_str_c = (wsp | letter | digit | '(' | ')' | '.' | '{' | '}' | '-' | '_' | '*' | '#' | ':' | ';' | ',' | '/' | '?' | '[' | ']' | '!' | '~' | '`' | '@' | '$' | '%' | '^' | '&' | '=' | '+' | '|' | '<' | '>' ) dquote = '"' squote = '\\'' python_str = (squote (python_str_c | dquote)* squote | dquote (python_str_c | squote)* dquote) env_var = ('python_version' | 'python_full_version' | 'os_name' | 'sys_platform' | 'platform_release' | 'platform_system' | 'platform_version' | 'platform_machine' | 'platform_python_implementation' | 'implementation_name' | 'implementation_version' | 'extra' # ONLY when defined by a containing layer ) marker_var = wsp* (env_var | python_str) marker_expr = marker_var marker_op marker_var | wsp* '(' marker wsp* ')' marker_and = marker_expr wsp* 'and' marker_expr | marker_expr marker_or = marker_and wsp* 'or' marker_and | marker_and marker = marker_or quoted_marker = ';' wsp* marker Optional components of a distribution may be specified using the extras field: identifier_end = letterOrDigit | (('-' | '_' | '.' )* letterOrDigit) identifier = letterOrDigit identifier_end* name = identifier extras_list = identifier (wsp* ',' wsp* identifier)* extras = '[' wsp* extras_list? wsp* ']' Giving us a rule for name based requirements: name_req = name wsp* extras? wsp* versionspec? wsp* quoted_marker? And a rule for direct reference specifications: url_req = name wsp* extras? wsp* urlspec wsp+ quoted_marker? Leading to the unified rule that can specify a dependency.: specification = wsp* ( url_req | name_req ) wsp* Whitespace Non line-breaking whitespace is mostly optional with no semantic meaning. The sole exception is detecting the end of a URL requirement. Names Python distribution names are currently defined in PEP 345. Names act as the primary identifier for distributions. They are present in all dependency specifications, and are sufficient to be a specification on their own. However, PyPI places strict restrictions on names - they must match a case insensitive regex or they won't be accepted. Accordingly, in this PEP we limit the acceptable values for identifiers to that regex. A full redefinition of name may take place in a future metadata PEP. The regex (run with re.IGNORECASE) is: ^([A-Z0-9]|[A-Z0-9][A-Z0-9._-]*[A-Z0-9])$ Extras An extra is an optional part of a distribution. Distributions can specify as many extras as they wish, and each extra results in the declaration of additional dependencies of the distribution when the extra is used in a dependency specification. For instance: requests[security] Extras union in the dependencies they define with the dependencies of the distribution they are attached to. The example above would result in requests being installed, and requests own dependencies, and also any dependencies that are listed in the "security" extra of requests. If multiple extras are listed, all the dependencies are unioned together. Versions See PEP 440 for more detail on both version numbers and version comparisons. Version specifications limit the versions of a distribution that can be used. They only apply to distributions looked up by name, rather than via a URL. Version comparison are also used in the markers feature. The optional brackets around a version are present for compatibility with PEP 345 but should not be generated, only accepted. Environment Markers Environment markers allow a dependency specification to provide a rule that describes when the dependency should be used. For instance, consider a package that needs argparse. In Python 2.7 argparse is always present. On older Python versions it has to be installed as a dependency. This can be expressed as so: argparse;python_version<"2.7" A marker expression evaluates to either True or False. When it evaluates to False, the dependency specification should be ignored. The marker language is inspired by Python itself, chosen for the ability to safely evaluate it without running arbitrary code that could become a security vulnerability. Markers were first standardised in PEP 345. This PEP fixes some issues that were observed in the design described in PEP 426. Comparisons in marker expressions are typed by the comparison operator. The <marker_op> operators that are not in <version_cmp> perform the same as they do for strings in Python. The <version_cmp> operators use the PEP 440 version comparison rules when those are defined (that is when both sides have a valid version specifier). If there is no defined PEP 440 behaviour and the operator exists in Python, then the operator falls back to the Python behaviour. Otherwise an error should be raised. e.g. the following will result in errors: "dog" ~= "fred" python_version ~= "surprise" User supplied constants are always encoded as strings with either ' or " quote marks. Note that backslash escapes are not defined, but existing implementations do support them. They are not included in this specification because they add complexity and there is no observable need for them today. Similarly we do not define non-ASCII character support: all the runtime variables we are referencing are expected to be ASCII-only. The variables in the marker grammar such as "os_name" resolve to values looked up in the Python runtime. With the exception of "extra" all values are defined on all Python versions today - it is an error in the implementation of markers if a value is not defined. Unknown variables must raise an error rather than resulting in a comparison that evaluates to True or False. Variables whose value cannot be calculated on a given Python implementation should evaluate to 0 for versions, and an empty string for all other variables. The "extra" variable is special. It is used by wheels to signal which specifications apply to a given extra in the wheel METADATA file, but since the METADATA file is based on a draft version of PEP 426, there is no current specification for this. Regardless, outside of a context where this special handling is taking place, the "extra" variable should result in an error like all other unknown variables. Marker Python equivalent Sample values -------------------------------- ----------------------------------------------------------------------------- ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ os_name os.name posix, java sys_platform sys.platform linux, linux2, darwin, java1.8.0_51 (note that "linux" is from Python3 and "linux2" from Python2) platform_machine platform.machine() x86_64 platform_python_implementation platform.python_implementation() CPython, Jython platform_release platform.release() 3.14.1-x86_64-linode39, 14.5.0, 1.8.0_51 platform_system platform.system() Linux, Windows, Java platform_version platform.version() #1 SMP Fri Apr 25 13:07:35 EDT 2014 Java HotSpot(TM) 64-Bit Server VM, 25.51-b03, Oracle Corporation Darwin Kernel Version 14.5.0: Wed Jul 29 02:18:53 PDT 2015; root:xnu-2782.40.9~2/RELEASE_X86_64 python_version '.'.join(platform.python_version_tuple()[:2]) 3.4, 2.7 python_full_version platform.python_version() 3.4.0, 3.5.0b1 implementation_name sys.implementation.name cpython implementation_version see definition below 3.4.0, 3.5.0b1 extra An error except when defined by the context interpreting the specification. test The implementation_version marker variable is derived from sys.implementation.version: def format_full_version(info): version = '{0.major}.{0.minor}.{0.micro}'.format(info) kind = info.releaselevel if kind != 'final': version += kind[0] + str(info.serial) return version if hasattr(sys, 'implementation'): implementation_version = format_full_version(sys.implementation.version) else: implementation_version = "0" Backwards Compatibility Most of this PEP is already widely deployed and thus offers no compatibility concerns. There are however a few points where the PEP differs from the deployed base. Firstly, PEP 440 direct references haven't actually been deployed in the wild, but they were designed to be compatibly added, and there are no known obstacles to adding them to pip or other tools that consume the existing dependency metadata in distributions - particularly since they won't be permitted to be present in PyPI uploaded distributions anyway. Secondly, PEP 426 markers which have had some reasonable deployment, particularly in wheels and pip, will handle version comparisons with python_full_version "2.7.10" differently. Specifically in 426 "2.7.10" is less than "2.7.9". This backward incompatibility is deliberate. We are also defining new operators - "~=" and "===", and new variables -platform_release, platform_system, implementation_name, and implementation_version which are not present in older marker implementations. The variables will error on those implementations. Users of both features will need to make a judgement as to when support has become sufficiently widespread in the ecosystem that using them will not cause compatibility issues. Thirdly, PEP 345 required brackets around version specifiers. In order to accept PEP 345 dependency specifications, brackets are accepted, but they should not be generated. Rationale In order to move forward with any new PEPs that depend on environment markers, we needed a specification that included them in their modern form. This PEP brings together all the currently unspecified components into a specified form. The requirement specifier was adopted from the EBNF in the setuptools pkg_resources documentation, since we wish to avoid depending on a de facto, vs PEP specified, standard. Complete Grammar The complete parsley grammar: wsp = ' ' | '\t' version_cmp = wsp* <'<=' | '<' | '!=' | '==' | '>=' | '>' | '~=' | '==='> version = wsp* <( letterOrDigit | '-' | '_' | '.' | '*' | '+' | '!' )+> version_one = version_cmp:op version:v wsp* -> (op, v) version_many = version_one:v1 (wsp* ',' version_one)*:v2 -> [v1] + v2 versionspec = ('(' version_many:v ')' ->v) | version_many urlspec = '@' wsp* <URI_reference> marker_op = version_cmp | (wsp* 'in') | (wsp* 'not' wsp+ 'in') python_str_c = (wsp | letter | digit | '(' | ')' | '.' | '{' | '}' | '-' | '_' | '*' | '#' | ':' | ';' | ',' | '/' | '?' | '[' | ']' | '!' | '~' | '`' | '@' | '$' | '%' | '^' | '&' | '=' | '+' | '|' | '<' | '>' ) dquote = '"' squote = '\\'' python_str = (squote <(python_str_c | dquote)*>:s squote | dquote <(python_str_c | squote)*>:s dquote) -> s env_var = ('python_version' | 'python_full_version' | 'os_name' | 'sys_platform' | 'platform_release' | 'platform_system' | 'platform_version' | 'platform_machine' | 'platform_python_implementation' | 'implementation_name' | 'implementation_version' | 'extra' # ONLY when defined by a containing layer ):varname -> lookup(varname) marker_var = wsp* (env_var | python_str) marker_expr = marker_var:l marker_op:o marker_var:r -> (o, l, r) | wsp* '(' marker:m wsp* ')' -> m marker_and = marker_expr:l wsp* 'and' marker_expr:r -> ('and', l, r) | marker_expr:m -> m marker_or = marker_and:l wsp* 'or' marker_and:r -> ('or', l, r) | marker_and:m -> m marker = marker_or quoted_marker = ';' wsp* marker identifier_end = letterOrDigit | (('-' | '_' | '.' )* letterOrDigit) identifier = < letterOrDigit identifier_end* > name = identifier extras_list = identifier:i (wsp* ',' wsp* identifier)*:ids -> [i] + ids extras = '[' wsp* extras_list?:e wsp* ']' -> e name_req = (name:n wsp* extras?:e wsp* versionspec?:v wsp* quoted_marker?:m -> (n, e or [], v or [], m)) url_req = (name:n wsp* extras?:e wsp* urlspec:v (wsp+ | end) quoted_marker?:m -> (n, e or [], v, m)) specification = wsp* ( url_req | name_req ):s wsp* -> s # The result is a tuple - name, list-of-extras, # list-of-version-constraints-or-a-url, marker-ast or None URI_reference = <URI | relative_ref> URI = scheme ':' hier_part ('?' query )? ( '#' fragment)? hier_part = ('//' authority path_abempty) | path_absolute | path_rootless | path_empty absolute_URI = scheme ':' hier_part ( '?' query )? relative_ref = relative_part ( '?' query )? ( '#' fragment )? relative_part = '//' authority path_abempty | path_absolute | path_noscheme | path_empty scheme = letter ( letter | digit | '+' | '-' | '.')* authority = ( userinfo '@' )? host ( ':' port )? userinfo = ( unreserved | pct_encoded | sub_delims | ':')* host = IP_literal | IPv4address | reg_name port = digit* IP_literal = '[' ( IPv6address | IPvFuture) ']' IPvFuture = 'v' hexdig+ '.' ( unreserved | sub_delims | ':')+ IPv6address = ( ( h16 ':'){6} ls32 | '::' ( h16 ':'){5} ls32 | ( h16 )? '::' ( h16 ':'){4} ls32 | ( ( h16 ':')? h16 )? '::' ( h16 ':'){3} ls32 | ( ( h16 ':'){0,2} h16 )? '::' ( h16 ':'){2} ls32 | ( ( h16 ':'){0,3} h16 )? '::' h16 ':' ls32 | ( ( h16 ':'){0,4} h16 )? '::' ls32 | ( ( h16 ':'){0,5} h16 )? '::' h16 | ( ( h16 ':'){0,6} h16 )? '::' ) h16 = hexdig{1,4} ls32 = ( h16 ':' h16) | IPv4address IPv4address = dec_octet '.' dec_octet '.' dec_octet '.' dec_octet nz = ~'0' digit dec_octet = ( digit # 0-9 | nz digit # 10-99 | '1' digit{2} # 100-199 | '2' ('0' | '1' | '2' | '3' | '4') digit # 200-249 | '25' ('0' | '1' | '2' | '3' | '4' | '5') )# %250-255 reg_name = ( unreserved | pct_encoded | sub_delims)* path = ( path_abempty # begins with '/' or is empty | path_absolute # begins with '/' but not '//' | path_noscheme # begins with a non-colon segment | path_rootless # begins with a segment | path_empty ) # zero characters path_abempty = ( '/' segment)* path_absolute = '/' ( segment_nz ( '/' segment)* )? path_noscheme = segment_nz_nc ( '/' segment)* path_rootless = segment_nz ( '/' segment)* path_empty = pchar{0} segment = pchar* segment_nz = pchar+ segment_nz_nc = ( unreserved | pct_encoded | sub_delims | '@')+ # non-zero-length segment without any colon ':' pchar = unreserved | pct_encoded | sub_delims | ':' | '@' query = ( pchar | '/' | '?')* fragment = ( pchar | '/' | '?')* pct_encoded = '%' hexdig unreserved = letter | digit | '-' | '.' | '_' | '~' reserved = gen_delims | sub_delims gen_delims = ':' | '/' | '?' | '#' | '(' | ')?' | '@' sub_delims = '!' | '$' | '&' | '\\'' | '(' | ')' | '*' | '+' | ',' | ';' | '=' hexdig = digit | 'a' | 'A' | 'b' | 'B' | 'c' | 'C' | 'd' | 'D' | 'e' | 'E' | 'f' | 'F' A test program - if the grammar is in a string grammar: import os import sys import platform from parsley import makeGrammar grammar = """ wsp ... """ tests = [ "A", "A.B-C_D", "aa", "name", "name<=1", "name>=3", "name>=3,<2", "name@http://foo.com", "name [fred,bar] @ http://foo.com ; python_version=='2.7'", "name[quux, strange];python_version<'2.7' and platform_version=='2'", "name; os_name=='a' or os_name=='b'", # Should parse as (a and b) or c "name; os_name=='a' and os_name=='b' or os_name=='c'", # Overriding precedence -> a and (b or c) "name; os_name=='a' and (os_name=='b' or os_name=='c')", # should parse as a or (b and c) "name; os_name=='a' or os_name=='b' and os_name=='c'", # Overriding precedence -> (a or b) and c "name; (os_name=='a' or os_name=='b') and os_name=='c'", ] def format_full_version(info): version = '{0.major}.{0.minor}.{0.micro}'.format(info) kind = info.releaselevel if kind != 'final': version += kind[0] + str(info.serial) return version if hasattr(sys, 'implementation'): implementation_version = format_full_version(sys.implementation.version) implementation_name = sys.implementation.name else: implementation_version = '0' implementation_name = '' bindings = { 'implementation_name': implementation_name, 'implementation_version': implementation_version, 'os_name': os.name, 'platform_machine': platform.machine(), 'platform_python_implementation': platform.python_implementation(), 'platform_release': platform.release(), 'platform_system': platform.system(), 'platform_version': platform.version(), 'python_full_version': platform.python_version(), 'python_version': '.'.join(platform.python_version_tuple()[:2]), 'sys_platform': sys.platform, } compiled = makeGrammar(grammar, {'lookup': bindings.__getitem__}) for test in tests: parsed = compiled(test).specification() print("%s -> %s" % (test, parsed)) Summary of changes to PEP 508 The following changes were made to this PEP based on feedback after its initial implementation: - The definition of python_version was changed from platform.python_version()[:3] to '.'.join(platform.python_version_tuple()[:2]), to accommodate potential future versions of Python with 2-digit major and minor versions (e.g. 3.10).[3] References Copyright This document has been placed in the public domain. Local Variables: mode: indented-text indent-tabs-mode: nil sentence-end-double-space: t fill-column: 70 coding: utf-8 End: [1] pip, the recommended installer for Python packages (http://pip.readthedocs.org/en/stable/) [2] The parsley PEG library. (https://pypi.python.org/pypi/parsley/) [3] Future Python versions might be problematic with the definition of Environment Marker Variable python_version (https://github.com/python/peps/issues/560)
python-peps
2024-10-18T13:23:31.707565
2015-11-11T00:00:00
{ "license": "Public Domain", "url": "https://peps.python.org/pep-0508/", "authors": [ "Robert Collins" ], "pep_number": "0508", "pandoc_version": "3.5" }
0713
PEP: 713 Title: Callable Modules Author: Amethyst Reese <amethyst at n7.gg> Sponsor: Łukasz Langa <lukasz at python.org> Discussions-To: https://discuss.python.org/t/pep-713-callable-modules/26127 Status: Rejected Type: Standards Track Content-Type: text/x-rst Created: 20-Apr-2023 Python-Version: 3.12 Post-History: 23-Apr-2023 Resolution: https://discuss.python.org/t/26127/86 Rejection Notice The Steering Council didn't feel that there was a compelling reason to have this PEP, even though it clearly could be done from a consistency point of view. If this idea comes up again in the future, this is a useful prior discussion to refer to. Abstract Modules are currently not directly callable. Classes can define a __call__ method that makes instance objects callable, but defining a similarly named function in the global module scope has no effect, and that function can only be called by importing or referencing it directly as module.__call__. PEP 562 added support for ~object.__getattr__ and ~object.__dir__ for modules, but defining __getattr__ to return a value for __call__ still does not make a module callable. This PEP proposes support for making modules directly callable by defining a __call__ object in the module's global namespace, either as a standard function, or an arbitrary callable object. Motivation Many modules have only a single primary interface to their functionality. In many cases, that interface is a single callable object, where being able to import and use the module directly as a callable provides a more "Pythonic" interface for users: # user.py import fancy @fancy def func(...): ... Currently, providing this style of interface requires modifying the module object at runtime to make it callable. This is commonly done by replacing the module object in sys.modules with a callable alternative (such as a function or class instance): # fancy.py def fancy(...): ... sys.modules[__name__] = fancy This has the effect of making the original module effectively unreachable without further hooks from the author, even with from module import member. It also results in a "module" object that is missing all of the special module attributes, including __doc__, __package__, __path__, etc. Alternatively, a module author can choose to override the module's __class__ property with a custom type that provides a callable interface: # fancy.py def fancy(...): ... class FancyModule(types.ModuleType): def __call__(self, ...): return fancy(...) sys.modules[__name__].__class__ = FancyModule The downside of either approach is that it not only results in extra boilerplate, but also results in type checker failures because they don't recognize that the module is callable at runtime: $ mypy user.py user.py:3: error: Module not callable [operator] Found 1 error in 1 file (checked 1 source file) Specification When a module object is called, and a __call__ object is found (either as the result of a __getattr__ or __dict__ lookup), then that object will be called with the given arguments. If a __call__ object is not found, then a TypeError will be raised, matching the existing behavior. All of these examples would be considered valid, callable modules: # hello.py def __call__(...): pass # hello.py class Hello: pass __call__ = Hello # hello.py def hello(): pass def __getattr__(name): if name == "__call__": return hello The first two styles should generally be preferred, as it allows for easier static analysis from tools like type checkers, though the third form would be allowed in order to make the implementation more consistent. The intent is to allow arbitrary callable object to be assigned to the module's __call__ property or returned by the module's __getattr__ method, enabling module authors to pick the most suitable mechanism for making their module callable by users. Backwards Compatibility and Impact on Performance This PEP is not expected to cause any backwards incompatibility. Any modules that already contain a __call__ object will continue to function the same as before, though with the additional ability to be called directly. It is considered unlikely that modules with an existing __call__ object would depend on the existing behavior of raising TypeError when called. Performance implications of this PEP are minimal, as it defines a new interface. Calling a module would trigger a lookup for the name __call__ on a module object. Existing workarounds for creating callable modules already depend on this behavior for generic objects, resulting in similar performance for these callable modules. Type checkers will likely need to be updated accordingly to treat modules with a __call__ object as callable. This should be possible to support in type checkers when checking code targeted at older Python versions that do not support callable modules, with the expectation that these modules would also include one of the workarounds mentioned earlier to make the module callable. How to Teach This The documentation for callable types <types> will include modules in the list, with a link to ~object.__call__. The callable-types documentation will include a section covering callable modules, with example code, similar to the section for customizing module attribute access. Reference Implementation The proposed implementation of callable modules is available in CPython PR #103742. Rejected Ideas Given the introduction of __getattr__ and __dir__, and the proposal to enable use of __call__, it was considered if it was worth allowing use of all specialnames for modules, such as __or__ and __iter__. While this would not be completely undesired, it increases the potential for backward compatibility concerns, and these other special methods are likely to provide less utility to library authors in comparison to __call__. Copyright This document is placed in the public domain or under the CC0-1.0-Universal license, whichever is more permissive.
python-peps
2024-10-18T13:23:31.820032
2023-04-20T00:00:00
{ "license": "Public Domain", "url": "https://peps.python.org/pep-0713/", "authors": [ "Amethyst Reese" ], "pep_number": "0713", "pandoc_version": "3.5" }
0418
PEP: 418 Title: Add monotonic time, performance counter, and process time functions Author: Cameron Simpson <[email protected]>, Jim J. Jewett <[email protected]>, Stephen J. Turnbull <[email protected]>, Victor Stinner <[email protected]> Status: Final Type: Standards Track Content-Type: text/x-rst Created: 26-Mar-2012 Python-Version: 3.3 Abstract This PEP proposes to add time.get_clock_info(name), time.monotonic(), time.perf_counter() and time.process_time() functions to Python 3.3. Rationale If a program uses the system time to schedule events or to implement a timeout, it may fail to run events at the right moment or stop the timeout too early or too late when the system time is changed manually or adjusted automatically by NTP. A monotonic clock should be used instead to not be affected by system time updates: time.monotonic(). To measure the performance of a function, time.clock() can be used but it is very different on Windows and on Unix. On Windows, time.clock() includes time elapsed during sleep, whereas it does not on Unix. time.clock() resolution is very good on Windows, but very bad on Unix. The new time.perf_counter() function should be used instead to always get the most precise performance counter with a portable behaviour (ex: include time spend during sleep). Until now, Python did not provide directly a portable function to measure CPU time. time.clock() can be used on Unix, but it has bad resolution. resource.getrusage() or os.times() can also be used on Unix, but they require to compute the sum of time spent in kernel space and user space. The new time.process_time() function acts as a portable counter that always measures CPU time (excluding time elapsed during sleep) and has the best available resolution. Each operating system implements clocks and performance counters differently, and it is useful to know exactly which function is used and some properties of the clock like its resolution. The new time.get_clock_info() function gives access to all available information about each Python time function. New functions: - time.monotonic(): timeout and scheduling, not affected by system clock updates - time.perf_counter(): benchmarking, most precise clock for short period - time.process_time(): profiling, CPU time of the process Users of new functions: - time.monotonic(): concurrent.futures, multiprocessing, queue, subprocess, telnet and threading modules to implement timeout - time.perf_counter(): trace and timeit modules, pybench program - time.process_time(): profile module - time.get_clock_info(): pybench program to display information about the timer like the resolution The time.clock() function is deprecated because it is not portable: it behaves differently depending on the operating system. time.perf_counter() or time.process_time() should be used instead, depending on your requirements. time.clock() is marked as deprecated but is not planned for removal. Limitations: - The behaviour of clocks after a system suspend is not defined in the documentation of new functions. The behaviour depends on the operating system: see the Monotonic Clocks section below. Some recent operating systems provide two clocks, one including time elapsed during system suspend, one not including this time. Most operating systems only provide one kind of clock. - time.monotonic() and time.perf_counter() may or may not be adjusted. For example, CLOCK_MONOTONIC is slewed on Linux, whereas GetTickCount() is not adjusted on Windows. time.get_clock_info('monotonic')['adjustable'] can be used to check if the monotonic clock is adjustable or not. - No time.thread_time() function is proposed by this PEP because it is not needed by Python standard library nor a common asked feature. Such function would only be available on Windows and Linux. On Linux, it is possible to use time.clock_gettime(CLOCK_THREAD_CPUTIME_ID). On Windows, ctypes or another module can be used to call the GetThreadTimes() function. Python functions New Functions time.get_clock_info(name) Get information on the specified clock. Supported clock names: - "clock": time.clock() - "monotonic": time.monotonic() - "perf_counter": time.perf_counter() - "process_time": time.process_time() - "time": time.time() Return a time.clock_info object which has the following attributes: - implementation (str): name of the underlying operating system function. Examples: "QueryPerformanceCounter()", "clock_gettime(CLOCK_REALTIME)". - monotonic (bool): True if the clock cannot go backward. - adjustable (bool): True if the clock can be changed automatically (e.g. by a NTP daemon) or manually by the system administrator, False otherwise - resolution (float): resolution in seconds of the clock. time.monotonic() Monotonic clock, i.e. cannot go backward. It is not affected by system clock updates. The reference point of the returned value is undefined, so that only the difference between the results of consecutive calls is valid and is a number of seconds. On Windows versions older than Vista, time.monotonic() detects GetTickCount() integer overflow (32 bits, roll-over after 49.7 days). It increases an internal epoch (reference time by) 2³² each time that an overflow is detected. The epoch is stored in the process-local state and so the value of time.monotonic() may be different in two Python processes running for more than 49 days. On more recent versions of Windows and on other operating systems, time.monotonic() is system-wide. Availability: Windows, Mac OS X, Linux, FreeBSD, OpenBSD, Solaris. Not available on GNU/Hurd. Pseudo-code[1]: if os.name == 'nt': # GetTickCount64() requires Windows Vista, Server 2008 or later if hasattr(_time, 'GetTickCount64'): def monotonic(): return _time.GetTickCount64() * 1e-3 else: def monotonic(): ticks = _time.GetTickCount() if ticks < monotonic.last: # Integer overflow detected monotonic.delta += 2**32 monotonic.last = ticks return (ticks + monotonic.delta) * 1e-3 monotonic.last = 0 monotonic.delta = 0 elif sys.platform == 'darwin': def monotonic(): if monotonic.factor is None: factor = _time.mach_timebase_info() monotonic.factor = timebase[0] / timebase[1] * 1e-9 return _time.mach_absolute_time() * monotonic.factor monotonic.factor = None elif hasattr(time, "clock_gettime") and hasattr(time, "CLOCK_HIGHRES"): def monotonic(): return time.clock_gettime(time.CLOCK_HIGHRES) elif hasattr(time, "clock_gettime") and hasattr(time, "CLOCK_MONOTONIC"): def monotonic(): return time.clock_gettime(time.CLOCK_MONOTONIC) On Windows, QueryPerformanceCounter() is not used even though it has a better resolution than GetTickCount(). It is not reliable and has too many issues. time.perf_counter() Performance counter with the highest available resolution to measure a short duration. It does include time elapsed during sleep and is system-wide. The reference point of the returned value is undefined, so that only the difference between the results of consecutive calls is valid and is a number of seconds. It is available on all platforms. Pseudo-code: if os.name == 'nt': def _win_perf_counter(): if _win_perf_counter.frequency is None: _win_perf_counter.frequency = _time.QueryPerformanceFrequency() return _time.QueryPerformanceCounter() / _win_perf_counter.frequency _win_perf_counter.frequency = None def perf_counter(): if perf_counter.use_performance_counter: try: return _win_perf_counter() except OSError: # QueryPerformanceFrequency() fails if the installed # hardware does not support a high-resolution performance # counter perf_counter.use_performance_counter = False if perf_counter.use_monotonic: # The monotonic clock is preferred over the system time try: return time.monotonic() except OSError: perf_counter.use_monotonic = False return time.time() perf_counter.use_performance_counter = (os.name == 'nt') perf_counter.use_monotonic = hasattr(time, 'monotonic') time.process_time() Sum of the system and user CPU time of the current process. It does not include time elapsed during sleep. It is process-wide by definition. The reference point of the returned value is undefined, so that only the difference between the results of consecutive calls is valid. It is available on all platforms. Pseudo-code[2]: if os.name == 'nt': def process_time(): handle = _time.GetCurrentProcess() process_times = _time.GetProcessTimes(handle) return (process_times['UserTime'] + process_times['KernelTime']) * 1e-7 else: try: import resource except ImportError: has_resource = False else: has_resource = True def process_time(): if process_time.clock_id is not None: try: return time.clock_gettime(process_time.clock_id) except OSError: process_time.clock_id = None if process_time.use_getrusage: try: usage = resource.getrusage(resource.RUSAGE_SELF) return usage[0] + usage[1] except OSError: process_time.use_getrusage = False if process_time.use_times: try: times = _time.times() cpu_time = times.tms_utime + times.tms_stime return cpu_time / process_time.ticks_per_seconds except OSError: process_time.use_getrusage = False return _time.clock() if (hasattr(time, 'clock_gettime') and hasattr(time, 'CLOCK_PROF')): process_time.clock_id = time.CLOCK_PROF elif (hasattr(time, 'clock_gettime') and hasattr(time, 'CLOCK_PROCESS_CPUTIME_ID')): process_time.clock_id = time.CLOCK_PROCESS_CPUTIME_ID else: process_time.clock_id = None process_time.use_getrusage = has_resource process_time.use_times = hasattr(_time, 'times') if process_time.use_times: # sysconf("SC_CLK_TCK"), or the HZ constant, or 60 process_time.ticks_per_seconds = _times.ticks_per_seconds Existing Functions time.time() The system time which is usually the civil time. It is system-wide by definition. It can be set manually by the system administrator or automatically by a NTP daemon. It is available on all platforms and cannot fail. Pseudo-code[3]: if os.name == "nt": def time(): return _time.GetSystemTimeAsFileTime() else: def time(): if hasattr(time, "clock_gettime"): try: return time.clock_gettime(time.CLOCK_REALTIME) except OSError: # CLOCK_REALTIME is not supported (unlikely) pass if hasattr(_time, "gettimeofday"): try: return _time.gettimeofday() except OSError: # gettimeofday() should not fail pass if hasattr(_time, "ftime"): return _time.ftime() else: return _time.time() time.sleep() Suspend execution for the given number of seconds. The actual suspension time may be less than that requested because any caught signal will terminate the time.sleep() following execution of that signal's catching routine. Also, the suspension time may be longer than requested by an arbitrary amount because of the scheduling of other activity in the system. Pseudo-code[4]: try: import select except ImportError: has_select = False else: has_select = hasattr(select, "select") if has_select: def sleep(seconds): return select.select([], [], [], seconds) elif hasattr(_time, "delay"): def sleep(seconds): milliseconds = int(seconds * 1000) _time.delay(milliseconds) elif os.name == "nt": def sleep(seconds): milliseconds = int(seconds * 1000) win32api.ResetEvent(hInterruptEvent); win32api.WaitForSingleObject(sleep.sigint_event, milliseconds) sleep.sigint_event = win32api.CreateEvent(NULL, TRUE, FALSE, FALSE) # SetEvent(sleep.sigint_event) will be called by the signal handler of SIGINT elif os.name == "os2": def sleep(seconds): milliseconds = int(seconds * 1000) DosSleep(milliseconds) else: def sleep(seconds): seconds = int(seconds) _time.sleep(seconds) Deprecated Function time.clock() On Unix, return the current processor time as a floating point number expressed in seconds. It is process-wide by definition. The resolution, and in fact the very definition of the meaning of "processor time", depends on that of the C function of the same name, but in any case, this is the function to use for benchmarking Python or timing algorithms. On Windows, this function returns wall-clock seconds elapsed since the first call to this function, as a floating point number, based on the Win32 function QueryPerformanceCounter(). The resolution is typically better than one microsecond. It is system-wide. Pseudo-code[5]: if os.name == 'nt': def clock(): try: return _win_perf_counter() except OSError: # QueryPerformanceFrequency() fails if the installed # hardware does not support a high-resolution performance # counter pass return _time.clock() else: clock = _time.clock Alternatives: API design Other names for time.monotonic() - time.counter() - time.metronomic() - time.seconds() - time.steady(): "steady" is ambiguous: it means different things to different people. For example, on Linux, CLOCK_MONOTONIC is adjusted. If we uses the real time as the reference clock, we may say that CLOCK_MONOTONIC is steady. But CLOCK_MONOTONIC gets suspended on system suspend, whereas real time includes any time spent in suspend. - time.timeout_clock() - time.wallclock(): time.monotonic() is not the system time aka the "wall clock", but a monotonic clock with an unspecified starting point. The name "time.try_monotonic()" was also proposed for an older version of time.monotonic() which would fall back to the system time when no monotonic clock was available. Other names for time.perf_counter() - time.high_precision() - time.highres() - time.hires() - time.performance_counter() - time.timer() Only expose operating system clocks To not have to define high-level clocks, which is a difficult task, a simpler approach is to only expose operating system clocks. time.clock_gettime() and related clock identifiers were already added to Python 3.3 for example. time.monotonic(): Fallback to system time If no monotonic clock is available, time.monotonic() falls back to the system time. Issues: - It is hard to define such a function correctly in the documentation: is it monotonic? Is it steady? Is it adjusted? - Some users want to decide what to do when no monotonic clock is available: use another clock, display an error, or do something else. Different APIs were proposed to define such function. One function with a flag: time.monotonic(fallback=True) - time.monotonic(fallback=True) falls back to the system time if no monotonic clock is available or if the monotonic clock failed. - time.monotonic(fallback=False) raises OSError if monotonic clock fails and NotImplementedError if the system does not provide a monotonic clock A keyword argument that gets passed as a constant in the caller is usually poor API. Raising NotImplementedError for a function is something uncommon in Python and should be avoided. One time.monotonic() function, no flag time.monotonic() returns (time: float, is_monotonic: bool). An alternative is to use a function attribute: time.monotonic.is_monotonic. The attribute value would be None before the first call to time.monotonic(). Choosing the clock from a list of constraints The PEP as proposed offers a few new clocks, but their guarantees are deliberately loose in order to offer useful clocks on different platforms. This inherently embeds policy in the calls, and the caller must thus choose a policy. The "choose a clock" approach suggests an additional API to let callers implement their own policy if necessary by making most platform clocks available and letting the caller pick amongst them. The PEP's suggested clocks are still expected to be available for the common simple use cases. To do this two facilities are needed: an enumeration of clocks, and metadata on the clocks to enable the user to evaluate their suitability. The primary interface is a function make simple choices easy: the caller can use time.get_clock(*flags) with some combination of flags. This includes at least: - time.MONOTONIC: clock cannot go backward - time.STEADY: clock rate is steady - time.ADJUSTED: clock may be adjusted, for example by NTP - time.HIGHRES: clock with the highest resolution It returns a clock object with a .now() method returning the current time. The clock object is annotated with metadata describing the clock feature set; its .flags field will contain at least all the requested flags. time.get_clock() returns None if no matching clock is found and so calls can be chained using the or operator. Example of a simple policy decision: T = get_clock(MONOTONIC) or get_clock(STEADY) or get_clock() t = T.now() The available clocks always at least include a wrapper for time.time(), so a final call with no flags can always be used to obtain a working clock. Examples of flags of system clocks: - QueryPerformanceCounter: MONOTONIC | HIGHRES - GetTickCount: MONOTONIC | STEADY - CLOCK_MONOTONIC: MONOTONIC | STEADY (or only MONOTONIC on Linux) - CLOCK_MONOTONIC_RAW: MONOTONIC | STEADY - gettimeofday(): (no flag) The clock objects contain other metadata including the clock flags with additional feature flags above those listed above, the name of the underlying OS facility, and clock precisions. time.get_clock() still chooses a single clock; an enumeration facility is also required. The most obvious method is to offer time.get_clocks() with the same signature as time.get_clock(), but returning a sequence of all clocks matching the requested flags. Requesting no flags would thus enumerate all available clocks, allowing the caller to make an arbitrary choice amongst them based on their metadata. Example partial implementation: clockutils.py. Working around operating system bugs? Should Python ensure that a monotonic clock is truly monotonic by computing the maximum with the clock value and the previous value? Since it's relatively straightforward to cache the last value returned using a static variable, it might be interesting to use this to make sure that the values returned are indeed monotonic. - Virtual machines provide less reliable clocks. - QueryPerformanceCounter() has known bugs (only one is not fixed yet) Python may only work around a specific known operating system bug: KB274323 contains a code example to workaround the bug (use GetTickCount() to detect QueryPerformanceCounter() leap). Issues with "correcting" non-monotonicities: - if the clock is accidentally set forward by an hour and then back again, you wouldn't have a useful clock for an hour - the cache is not shared between processes so different processes wouldn't see the same clock value Glossary Accuracy The amount of deviation of measurements by a given instrument from true values. See also Accuracy and precision. Inaccuracy in clocks may be caused by lack of precision, drift, or an incorrect initial setting of the clock (e.g., timing of threads is inherently inaccurate because perfect synchronization in resetting counters is quite difficult). Adjusted Resetting a clock to the correct time. This may be done either with a <Step> or by <Slewing>. Civil Time Time of day; external to the system. 10:45:13am is a Civil time; 45 seconds is not. Provided by existing function time.localtime() and time.gmtime(). Not changed by this PEP. Clock An instrument for measuring time. Different clocks have different characteristics; for example, a clock with nanosecond <precision> may start to <drift> after a few minutes, while a less precise clock remained accurate for days. This PEP is primarily concerned with clocks which use a unit of seconds. Counter A clock which increments each time a certain event occurs. A counter is strictly monotonic, but not a monotonic clock. It can be used to generate a unique (and ordered) timestamp, but these timestamps cannot be mapped to <civil time>; tick creation may well be bursty, with several advances in the same millisecond followed by several days without any advance. CPU Time A measure of how much CPU effort has been spent on a certain task. CPU seconds are often normalized (so that a variable number can occur in the same actual second). CPU seconds can be important when profiling, but they do not map directly to user response time, nor are they directly comparable to (real time) seconds. Drift The accumulated error against "true" time, as defined externally to the system. Drift may be due to imprecision, or to a difference between the average rate at which clock time advances and that of real time. Epoch The reference point of a clock. For clocks providing <civil time>, this is often midnight as the day (and year) rolled over to January 1, 1970. For a <clock_monotonic> clock, the epoch may be undefined (represented as None). Latency Delay. By the time a clock call returns, the <real time> has advanced, possibly by more than the precision of the clock. Monotonic The characteristics expected of a monotonic clock in practice. Moving in at most one direction; for clocks, that direction is forward. The <clock> should also be <steady>, and should be convertible to a unit of seconds. The tradeoffs often include lack of a defined <epoch> or mapping to <Civil Time>. Precision The amount of deviation among measurements of the same physical value by a single instrument. Imprecision in clocks may be caused by a fluctuation of the rate at which clock time advances relative to real time, including clock adjustment by slewing. Process Time Time elapsed since the process began. It is typically measured in <CPU time> rather than <real time>, and typically does not advance while the process is suspended. Real Time Time in the real world. This differs from <Civil time> in that it is not <adjusted>, but they should otherwise advance in lockstep. It is not related to the "real time" of "Real Time [Operating] Systems". It is sometimes called "wall clock time" to avoid that ambiguity; unfortunately, that introduces different ambiguities. Resolution The smallest difference between two physical values that results in a different measurement by a given instrument. Slew A slight change to a clock's speed, usually intended to correct <drift> with respect to an external authority. Stability Persistence of accuracy. A measure of expected <drift>. Steady A clock with high <stability> and relatively high <accuracy> and <precision>. In practice, it is often used to indicate a <clock_monotonic> clock, but places greater emphasis on the consistency of the duration between subsequent ticks. Step An instantaneous change in the represented time. Instead of speeding or slowing the clock (<slew>), a single offset is permanently added. System Time Time as represented by the Operating System. Thread Time Time elapsed since the thread began. It is typically measured in <CPU time> rather than <real time>, and typically does not advance while the thread is idle. Wallclock What the clock on the wall says. This is typically used as a synonym for <real time>; unfortunately, wall time is itself ambiguous. Hardware clocks List of hardware clocks - HPET: A High Precision Event Timer (HPET) chip consists of a 64-bit up-counter (main counter) counting at least at 10 MHz and a set of up to 256 comparators (at least 3). Each HPET can have up to 32 timers. HPET can cause around 3 seconds of drift per day. - TSC (Time Stamp Counter): Historically, the TSC increased with every internal processor clock cycle, but now the rate is usually constant (even if the processor changes frequency) and usually equals the maximum processor frequency. Multiple cores have different TSC values. Hibernation of system will reset TSC value. The RDTSC instruction can be used to read this counter. CPU frequency scaling for power saving. - ACPI Power Management Timer: ACPI 24-bit timer with a frequency of 3.5 MHz (3,579,545 Hz). - Cyclone: The Cyclone timer uses a 32-bit counter on IBM Extended X-Architecture (EXA) chipsets which include computers that use the IBM "Summit" series chipsets (ex: x440). This is available in IA32 and IA64 architectures. - PIT (programmable interrupt timer): Intel 8253/8254 chipsets with a configurable frequency in range 18.2 Hz - 1.2 MHz. It uses a 16-bit counter. - RTC (Real-time clock). Most RTCs use a crystal oscillator with a frequency of 32,768 Hz. Linux clocksource There were 4 implementations of the time in the Linux kernel: UTIME (1996), timer wheel (1997), HRT (2001) and hrtimers (2007). The latter is the result of the "high-res-timers" project started by George Anzinger in 2001, with contributions by Thomas Gleixner and Douglas Niehaus. The hrtimers implementation was merged into Linux 2.6.21, released in 2007. hrtimers supports various clock sources. It sets a priority to each source to decide which one will be used. Linux supports the following clock sources: - tsc - hpet - pit - pmtmr: ACPI Power Management Timer - cyclone High-resolution timers are not supported on all hardware architectures. They are at least provided on x86/x86_64, ARM and PowerPC. clock_getres() returns 1 nanosecond for CLOCK_REALTIME and CLOCK_MONOTONIC regardless of underlying clock source. Read Re: clock_getres() and real resolution from Thomas Gleixner (9 Feb 2012) for an explanation. The /sys/devices/system/clocksource/clocksource0 directory contains two useful files: - available_clocksource: list of available clock sources - current_clocksource: clock source currently used. It is possible to change the current clocksource by writing the name of a clocksource into this file. /proc/timer_list contains the list of all hardware timers. Read also the time(7) manual page: "overview of time and timers". FreeBSD timecounter kern.timecounter.choice lists available hardware clocks with their priority. The sysctl program can be used to change the timecounter. Example: # dmesg | grep Timecounter Timecounter "i8254" frequency 1193182 Hz quality 0 Timecounter "ACPI-safe" frequency 3579545 Hz quality 850 Timecounter "HPET" frequency 100000000 Hz quality 900 Timecounter "TSC" frequency 3411154800 Hz quality 800 Timecounters tick every 10.000 msec # sysctl kern.timecounter.choice kern.timecounter.choice: TSC(800) HPET(900) ACPI-safe(850) i8254(0) dummy(-1000000) # sysctl kern.timecounter.hardware="ACPI-fast" kern.timecounter.hardware: HPET -> ACPI-fast Available clocks: - "TSC": Time Stamp Counter of the processor - "HPET": High Precision Event Timer - "ACPI-fast": ACPI Power Management timer (fast mode) - "ACPI-safe": ACPI Power Management timer (safe mode) - "i8254": PIT with Intel 8254 chipset The commit 222222 (May 2011) decreased ACPI-fast timecounter quality to 900 and increased HPET timecounter quality to 950: "HPET on modern platforms usually have better resolution and lower latency than ACPI timer". Read Timecounters: Efficient and precise timekeeping in SMP kernels by Poul-Henning Kamp (2002) for the FreeBSD Project. Performance Reading a hardware clock has a cost. The following table compares the performance of different hardware clocks on Linux 3.3 with Intel Core i7-2600 at 3.40GHz (8 cores). The bench_time.c program was used to fill these tables. +--------------------------+---------+----------+---------+ | Function | TSC | ACPI PM | HPET | +==========================+=========+==========+=========+ | time() | 2 ns | 2 ns | 2 ns | +--------------------------+---------+----------+---------+ | CLOCK_REALTIME_COARSE | 10 ns | 10 ns | 10 ns | +--------------------------+---------+----------+---------+ | CLOCK_MONOTONIC_COARSE | 12 ns | 13 ns | 12 ns | +--------------------------+---------+----------+---------+ | CLOCK_THREAD_CPUTIME_ID | 134 ns | 135 ns | 135 ns | +--------------------------+---------+----------+---------+ | CLOCK_PROCESS_CPUTIME_ID | 127 ns | 129 ns | 129 ns | +--------------------------+---------+----------+---------+ | clock() | 146 ns | 146 ns | 143 ns | +--------------------------+---------+----------+---------+ | gettimeofday() | 23 ns | 726 ns | 637 ns | +--------------------------+---------+----------+---------+ | CLOCK_MONOTONIC_RAW | 31 ns | 716 ns | 607 ns | +--------------------------+---------+----------+---------+ | CLOCK_REALTIME | 27 ns | 707 ns | 629 ns | +--------------------------+---------+----------+---------+ | CLOCK_MONOTONIC | 27 ns | 723 ns | 635 ns | +--------------------------+---------+----------+---------+ FreeBSD 8.0 in kvm with hardware virtualization: +-------------------------+--------+-----------+----------+----------+ | Function | TSC | ACPI-Safe | HPET | i8254 | +=========================+========+===========+==========+==========+ | time() | 191 ns | 188 ns | 189 ns | 188 ns | +-------------------------+--------+-----------+----------+----------+ | CLOCK_SECOND | 187 ns | 184 ns | 187 ns | 183 ns | +-------------------------+--------+-----------+----------+----------+ | CLOCK_REALTIME_FAST | 189 ns | 180 ns | 187 ns | 190 ns | +-------------------------+--------+-----------+----------+----------+ | CLOCK_UPTIME_FAST | 191 ns | 185 ns | 186 ns | 196 ns | +-------------------------+--------+-----------+----------+----------+ | CLOCK_MONOTONIC_FAST | 188 ns | 187 ns | 188 ns | 189 ns | +-------------------------+--------+-----------+----------+----------+ | CLOCK_THREAD_CPUTIME_ID | 208 ns | 206 ns | 207 ns | 220 ns | +-------------------------+--------+-----------+----------+----------+ | CLOCK_VIRTUAL | 280 ns | 279 ns | 283 ns | 296 ns | +-------------------------+--------+-----------+----------+----------+ | CLOCK_PROF | 289 ns | 280 ns | 282 ns | 286 ns | +-------------------------+--------+-----------+----------+----------+ | clock() | 342 ns | 340 ns | 337 ns | 344 ns | +-------------------------+--------+-----------+----------+----------+ | CLOCK_UPTIME_PRECISE | 197 ns | 10380 ns | 4402 ns | 4097 ns | +-------------------------+--------+-----------+----------+----------+ | CLOCK_REALTIME | 196 ns | 10376 ns | 4337 ns | 4054 ns | +-------------------------+--------+-----------+----------+----------+ | CLOCK_MONOTONIC_PRECISE | 198 ns | 10493 ns | 4413 ns | 3958 ns | +-------------------------+--------+-----------+----------+----------+ | CLOCK_UPTIME | 197 ns | 10523 ns | 4458 ns | 4058 ns | +-------------------------+--------+-----------+----------+----------+ | gettimeofday() | 202 ns | 10524 ns | 4186 ns | 3962 ns | +-------------------------+--------+-----------+----------+----------+ | CLOCK_REALTIME_PRECISE | 197 ns | 10599 ns | 4394 ns | 4060 ns | +-------------------------+--------+-----------+----------+----------+ | CLOCK_MONOTONIC | 201 ns | 10766 ns | 4498 ns | 3943 ns | +-------------------------+--------+-----------+----------+----------+ Each function was called 100,000 times and CLOCK_MONOTONIC was used to get the time before and after. The benchmark was run 5 times, keeping the minimum time. NTP adjustment NTP has different methods to adjust a clock: - "slewing": change the clock frequency to be slightly faster or slower (which is done with adjtime()). Since the slew rate is limited to 0.5 millisecond per second, each second of adjustment requires an amortization interval of 2000 seconds. Thus, an adjustment of many seconds can take hours or days to amortize. - "stepping": jump by a large amount in a single discrete step (which is done with settimeofday()) By default, the time is slewed if the offset is less than 128 ms, but stepped otherwise. Slewing is generally desirable (i.e. we should use CLOCK_MONOTONIC, not CLOCK_MONOTONIC_RAW) if one wishes to measure "real" time (and not a time-like object like CPU cycles). This is because the clock on the other end of the NTP connection from you is probably better at keeping time: hopefully that thirty-five thousand dollars of Cesium timekeeping goodness is doing something better than your PC's $3 quartz crystal, after all. Get more detail in the documentation of the NTP daemon. Operating system time functions Monotonic Clocks +-------------+-------------+-------------+-------------+-------------+ | Name | C | Adjusted | Include | Include | | | Resolution | | Sleep | Suspend | +=============+=============+=============+=============+=============+ | gethrtime() | 1 ns | No | Yes | Yes | +-------------+-------------+-------------+-------------+-------------+ | CL | 1 ns | No | Yes | Yes | | OCK_HIGHRES | | | | | +-------------+-------------+-------------+-------------+-------------+ | CLOC | 1 ns | Slewed on | Yes | No | | K_MONOTONIC | | Linux | | | +-------------+-------------+-------------+-------------+-------------+ | CLOCK_MONOT | 1 ns | Slewed on | Yes | No | | ONIC_COARSE | | Linux | | | +-------------+-------------+-------------+-------------+-------------+ | CLOCK_MO | 1 ns | No | Yes | No | | NOTONIC_RAW | | | | | +-------------+-------------+-------------+-------------+-------------+ | CLO | 1 ns | ? | Yes | Yes | | CK_BOOTTIME | | | | | +-------------+-------------+-------------+-------------+-------------+ | C | 1 ns | No | Yes | ? | | LOCK_UPTIME | | | | | +-------------+-------------+-------------+-------------+-------------+ | mach_abso | 1 ns | No | Yes | No | | lute_time() | | | | | +-------------+-------------+-------------+-------------+-------------+ | Que | - | No | Yes | ? | | ryPerforman | | | | | | ceCounter() | | | | | +-------------+-------------+-------------+-------------+-------------+ | GetTick | 1 ms | No | Yes | Yes | | Count[64]() | | | | | +-------------+-------------+-------------+-------------+-------------+ | ti | 1 ms | No | Yes | ? | | meGetTime() | | | | | +-------------+-------------+-------------+-------------+-------------+ The "C Resolution" column is the resolution of the underlying C structure. Examples of clock resolution on x86_64: +-------------------------+------------------+---------------+-------------------+ | Name | Operating system | OS Resolution | Python Resolution | +=========================+==================+===============+===================+ | QueryPerformanceCounter | Windows Seven | 10 ns | 10 ns | +-------------------------+------------------+---------------+-------------------+ | CLOCK_HIGHRES | SunOS 5.11 | 2 ns | 265 ns | +-------------------------+------------------+---------------+-------------------+ | CLOCK_MONOTONIC | Linux 3.0 | 1 ns | 322 ns | +-------------------------+------------------+---------------+-------------------+ | CLOCK_MONOTONIC_RAW | Linux 3.3 | 1 ns | 628 ns | +-------------------------+------------------+---------------+-------------------+ | CLOCK_BOOTTIME | Linux 3.3 | 1 ns | 628 ns | +-------------------------+------------------+---------------+-------------------+ | mach_absolute_time() | Mac OS 10.6 | 1 ns | 3 µs | +-------------------------+------------------+---------------+-------------------+ | CLOCK_MONOTONIC | FreeBSD 8.2 | 11 ns | 5 µs | +-------------------------+------------------+---------------+-------------------+ | CLOCK_MONOTONIC | OpenBSD 5.0 | 10 ms | 5 µs | +-------------------------+------------------+---------------+-------------------+ | CLOCK_UPTIME | FreeBSD 8.2 | 11 ns | 6 µs | +-------------------------+------------------+---------------+-------------------+ | CLOCK_MONOTONIC_COARSE | Linux 3.3 | 1 ms | 1 ms | +-------------------------+------------------+---------------+-------------------+ | CLOCK_MONOTONIC_COARSE | Linux 3.0 | 4 ms | 4 ms | +-------------------------+------------------+---------------+-------------------+ | GetTickCount64() | Windows Seven | 16 ms | 15 ms | +-------------------------+------------------+---------------+-------------------+ The "OS Resolution" is the resolution announced by the operating system. The "Python Resolution" is the smallest difference between two calls to the time function computed in Python using the clock_resolution.py program. mach_absolute_time Mac OS X provides a monotonic clock: mach_absolute_time(). It is based on absolute elapsed time since system boot. It is not adjusted and cannot be set. mach_timebase_info() gives a fraction to convert the clock value to a number of nanoseconds. See also the Technical Q&A QA1398. mach_absolute_time() stops during a sleep on a PowerPC CPU, but not on an Intel CPU: Different behaviour of mach_absolute_time() on i386/ppc. CLOCK_MONOTONIC, CLOCK_MONOTONIC_RAW, CLOCK_BOOTTIME CLOCK_MONOTONIC and CLOCK_MONOTONIC_RAW represent monotonic time since some unspecified starting point. They cannot be set. The resolution can be read using clock_getres(). Documentation: refer to the manual page of your operating system. Examples: - FreeBSD clock_gettime() manual page - Linux clock_gettime() manual page CLOCK_MONOTONIC is available at least on the following operating systems: - DragonFly BSD, FreeBSD >= 5.0, OpenBSD, NetBSD - Linux - Solaris The following operating systems don't support CLOCK_MONOTONIC: - GNU/Hurd (see open issues/ clock_gettime) - Mac OS X - Windows On Linux, NTP may adjust the CLOCK_MONOTONIC rate (slewed), but it cannot jump backward. CLOCK_MONOTONIC_RAW is specific to Linux. It is similar to CLOCK_MONOTONIC, but provides access to a raw hardware-based time that is not subject to NTP adjustments. CLOCK_MONOTONIC_RAW requires Linux 2.6.28 or later. Linux 2.6.39 and glibc 2.14 introduces a new clock: CLOCK_BOOTTIME. CLOCK_BOOTTIME is identical to CLOCK_MONOTONIC, except that it also includes any time spent in suspend. Read also Waking systems from suspend (March, 2011). CLOCK_MONOTONIC stops while the machine is suspended. Linux provides also CLOCK_MONOTONIC_COARSE since Linux 2.6.32. It is similar to CLOCK_MONOTONIC, less precise but faster. clock_gettime() fails if the system does not support the specified clock, even if the standard C library supports it. For example, CLOCK_MONOTONIC_RAW requires a kernel version 2.6.28 or later. Windows: QueryPerformanceCounter High-resolution performance counter. It is monotonic. The frequency of the counter can be read using QueryPerformanceFrequency(). The resolution is 1 / QueryPerformanceFrequency(). It has a much higher resolution, but has lower long term precision than GetTickCount() and timeGetTime() clocks. For example, it will drift compared to the low precision clocks. Documentation: - MSDN: QueryPerformanceCounter() documentation - MSDN: QueryPerformanceFrequency() documentation Hardware clocks used by QueryPerformanceCounter: - Windows XP: RDTSC instruction of Intel processors, the clock frequency is the frequency of the processor (between 200 MHz and 3 GHz, usually greater than 1 GHz nowadays). - Windows 2000: ACPI power management timer, frequency = 3,549,545 Hz. It can be forced through the "/usepmtimer" flag in boot.ini. QueryPerformanceFrequency() should only be called once: the frequency will not change while the system is running. It fails if the installed hardware does not support a high-resolution performance counter. QueryPerformanceCounter() cannot be adjusted: SetSystemTimeAdjustment() only adjusts the system time. Bugs: - The performance counter value may unexpectedly leap forward because of a hardware bug, see KB274323. - On VirtualBox, QueryPerformanceCounter() does not increment the high part every time the low part overflows, see Monotonic timers (2009). - VirtualBox had a bug in its HPET virtualized device: QueryPerformanceCounter() did jump forward by approx. 42 seconds (issue #8707). - Windows XP had a bug (see KB896256): on a multiprocessor computer, QueryPerformanceCounter() returned a different value for each processor. The bug was fixed in Windows XP SP2. - Issues with processor with variable frequency: the frequency is changed depending on the workload to reduce memory consumption. - Chromium don't use QueryPerformanceCounter() on Athlon X2 CPUs (model 15) because "QueryPerformanceCounter is unreliable" (see base/time_win.cc in Chromium source code) Windows: GetTickCount(), GetTickCount64() GetTickCount() and GetTickCount64() are monotonic, cannot fail and are not adjusted by SetSystemTimeAdjustment(). MSDN documentation: GetTickCount(), GetTickCount64(). The resolution can be read using GetSystemTimeAdjustment(). The elapsed time retrieved by GetTickCount() or GetTickCount64() includes time the system spends in sleep or hibernation. GetTickCount64() was added to Windows Vista and Windows Server 2008. It is possible to improve the precision using the undocumented NtSetTimerResolution() function. There are applications using this undocumented function, example: Timer Resolution. WaitForSingleObject() uses the same timer as GetTickCount() with the same precision. Windows: timeGetTime The timeGetTime function retrieves the system time, in milliseconds. The system time is the time elapsed since Windows was started. Read the timeGetTime() documentation. The return type of timeGetTime() is a 32-bit unsigned integer. As GetTickCount(), timeGetTime() rolls over after 2^32 milliseconds (49.7 days). The elapsed time retrieved by timeGetTime() includes time the system spends in sleep. The default precision of the timeGetTime function can be five milliseconds or more, depending on the machine. timeBeginPeriod() can be used to increase the precision of timeGetTime() up to 1 millisecond, but it negatively affects power consumption. Calling timeBeginPeriod() also affects the granularity of some other timing calls, such as CreateWaitableTimer(), WaitForSingleObject() and Sleep(). Note timeGetTime() and timeBeginPeriod() are part the Windows multimedia library and so require to link the program against winmm or to dynamically load the library. Solaris: CLOCK_HIGHRES The Solaris OS has a CLOCK_HIGHRES timer that attempts to use an optimal hardware source, and may give close to nanosecond resolution. CLOCK_HIGHRES is the nonadjustable, high-resolution clock. For timers created with a clockid_t value of CLOCK_HIGHRES, the system will attempt to use an optimal hardware source. The resolution of CLOCK_HIGHRES can be read using clock_getres(). Solaris: gethrtime The gethrtime() function returns the current high-resolution real time. Time is expressed as nanoseconds since some arbitrary time in the past; it is not correlated in any way to the time of day, and thus is not subject to resetting or drifting by way of adjtime() or settimeofday(). The hires timer is ideally suited to performance measurement tasks, where cheap, accurate interval timing is required. The linearity of gethrtime() is not preserved across a suspend-resume cycle (Bug 4272663). Read the gethrtime() manual page of Solaris 11. On Solaris, gethrtime() is the same as clock_gettime(CLOCK_MONOTONIC). System Time Name C Resolution Include Sleep Include Suspend ------------------------- -------------- --------------- ----------------- CLOCK_REALTIME 1 ns Yes Yes CLOCK_REALTIME_COARSE 1 ns Yes Yes GetSystemTimeAsFileTime 100 ns Yes Yes gettimeofday() 1 µs Yes Yes ftime() 1 ms Yes Yes time() 1 sec Yes Yes The "C Resolution" column is the resolution of the underlying C structure. Examples of clock resolution on x86_64: +---------------------------+------------------+---------------+-------------------+ | Name | Operating system | OS Resolution | Python Resolution | +===========================+==================+===============+===================+ | CLOCK_REALTIME | SunOS 5.11 | 10 ms | 238 ns | +---------------------------+------------------+---------------+-------------------+ | CLOCK_REALTIME | Linux 3.0 | 1 ns | 238 ns | +---------------------------+------------------+---------------+-------------------+ | gettimeofday() | Mac OS 10.6 | 1 µs | 4 µs | +---------------------------+------------------+---------------+-------------------+ | CLOCK_REALTIME | FreeBSD 8.2 | 11 ns | 6 µs | +---------------------------+------------------+---------------+-------------------+ | CLOCK_REALTIME | OpenBSD 5.0 | 10 ms | 5 µs | +---------------------------+------------------+---------------+-------------------+ | CLOCK_REALTIME_COARSE | Linux 3.3 | 1 ms | 1 ms | +---------------------------+------------------+---------------+-------------------+ | CLOCK_REALTIME_COARSE | Linux 3.0 | 4 ms | 4 ms | +---------------------------+------------------+---------------+-------------------+ | GetSystemTimeAsFileTime() | Windows Seven | 16 ms | 1 ms | +---------------------------+------------------+---------------+-------------------+ | ftime() | Windows Seven | - | 1 ms | +---------------------------+------------------+---------------+-------------------+ The "OS Resolution" is the resolution announced by the operating system. The "Python Resolution" is the smallest difference between two calls to the time function computed in Python using the clock_resolution.py program. Windows: GetSystemTimeAsFileTime The system time can be read using GetSystemTimeAsFileTime(), ftime() and time(). The resolution of the system time can be read using GetSystemTimeAdjustment(). Read the GetSystemTimeAsFileTime() documentation. The system time can be set using SetSystemTime(). System time on UNIX gettimeofday(), ftime(), time() and clock_gettime(CLOCK_REALTIME) return the system time. The resolution of CLOCK_REALTIME can be read using clock_getres(). The system time can be set using settimeofday() or clock_settime(CLOCK_REALTIME). Linux provides also CLOCK_REALTIME_COARSE since Linux 2.6.32. It is similar to CLOCK_REALTIME, less precise but faster. Alexander Shishkin proposed an API for Linux to be notified when the system clock is changed: timerfd: add TFD_NOTIFY_CLOCK_SET to watch for clock changes (4th version of the API, March 2011). The API is not accepted yet, but CLOCK_BOOTTIME provides a similar feature. Process Time The process time cannot be set. It is not monotonic: the clocks stop while the process is idle. +----------------+--------------+----------------+----------------+ | Name | C Resolution | Include Sleep | Include | | | | | Suspend | +================+==============+================+================+ | Get | 100 ns | No | No | | ProcessTimes() | | | | +----------------+--------------+----------------+----------------+ | CLOCK_PROC | 1 ns | No | No | | ESS_CPUTIME_ID | | | | +----------------+--------------+----------------+----------------+ | getrusag | 1 µs | No | No | | e(RUSAGE_SELF) | | | | +----------------+--------------+----------------+----------------+ | times() | - | No | No | +----------------+--------------+----------------+----------------+ | clock() | - | Yes on | No | | | | Windows, No | | | | | otherwise | | +----------------+--------------+----------------+----------------+ The "C Resolution" column is the resolution of the underlying C structure. Examples of clock resolution on x86_64: +--------------------------+------------------+---------------+-------------------+ | Name | Operating system | OS Resolution | Python Resolution | +==========================+==================+===============+===================+ | CLOCK_PROCESS_CPUTIME_ID | Linux 3.3 | 1 ns | 1 ns | +--------------------------+------------------+---------------+-------------------+ | CLOCK_PROF | FreeBSD 8.2 | 10 ms | 1 µs | +--------------------------+------------------+---------------+-------------------+ | getrusage(RUSAGE_SELF) | FreeBSD 8.2 | - | 1 µs | +--------------------------+------------------+---------------+-------------------+ | getrusage(RUSAGE_SELF) | SunOS 5.11 | - | 1 µs | +--------------------------+------------------+---------------+-------------------+ | CLOCK_PROCESS_CPUTIME_ID | Linux 3.0 | 1 ns | 1 µs | +--------------------------+------------------+---------------+-------------------+ | getrusage(RUSAGE_SELF) | Mac OS 10.6 | - | 5 µs | +--------------------------+------------------+---------------+-------------------+ | clock() | Mac OS 10.6 | 1 µs | 5 µs | +--------------------------+------------------+---------------+-------------------+ | CLOCK_PROF | OpenBSD 5.0 | - | 5 µs | +--------------------------+------------------+---------------+-------------------+ | getrusage(RUSAGE_SELF) | Linux 3.0 | - | 4 ms | +--------------------------+------------------+---------------+-------------------+ | getrusage(RUSAGE_SELF) | OpenBSD 5.0 | - | 8 ms | +--------------------------+------------------+---------------+-------------------+ | clock() | FreeBSD 8.2 | 8 ms | 8 ms | +--------------------------+------------------+---------------+-------------------+ | clock() | Linux 3.0 | 1 µs | 10 ms | +--------------------------+------------------+---------------+-------------------+ | times() | Linux 3.0 | 10 ms | 10 ms | +--------------------------+------------------+---------------+-------------------+ | clock() | OpenBSD 5.0 | 10 ms | 10 ms | +--------------------------+------------------+---------------+-------------------+ | times() | OpenBSD 5.0 | 10 ms | 10 ms | +--------------------------+------------------+---------------+-------------------+ | times() | Mac OS 10.6 | 10 ms | 10 ms | +--------------------------+------------------+---------------+-------------------+ | clock() | SunOS 5.11 | 1 µs | 10 ms | +--------------------------+------------------+---------------+-------------------+ | times() | SunOS 5.11 | 1 µs | 10 ms | +--------------------------+------------------+---------------+-------------------+ | GetProcessTimes() | Windows Seven | 16 ms | 16 ms | +--------------------------+------------------+---------------+-------------------+ | clock() | Windows Seven | 1 ms | 1 ms | +--------------------------+------------------+---------------+-------------------+ The "OS Resolution" is the resolution announced by the operating system. The "Python Resolution" is the smallest difference between two calls to the time function computed in Python using the clock_resolution.py program. Functions - Windows: GetProcessTimes(). The resolution can be read using GetSystemTimeAdjustment(). - clock_gettime(CLOCK_PROCESS_CPUTIME_ID): High-resolution per-process timer from the CPU. The resolution can be read using clock_getres(). - clock(). The resolution is 1 / CLOCKS_PER_SEC. - Windows: The elapsed wall-clock time since the start of the process (elapsed time in seconds times CLOCKS_PER_SEC). Include time elapsed during sleep. It can fail. - UNIX: returns an approximation of processor time used by the program. - getrusage(RUSAGE_SELF) returns a structure of resource usage of the currenet process. ru_utime is user CPU time and ru_stime is the system CPU time. - times(): structure of process times. The resolution is 1 / ticks_per_seconds, where ticks_per_seconds is sysconf(_SC_CLK_TCK) or the HZ constant. Python source code includes a portable library to get the process time (CPU time): Tools/pybench/systimes.py. See also the QueryProcessCycleTime() function (sum of the cycle time of all threads) and clock_getcpuclockid(). Thread Time The thread time cannot be set. It is not monotonic: the clocks stop while the thread is idle. +-------------------------+--------------+---------------+-----------------+ | Name | C Resolution | Include Sleep | Include Suspend | +=========================+==============+===============+=================+ | CLOCK_THREAD_CPUTIME_ID | 1 ns | Yes | Epoch changes | +-------------------------+--------------+---------------+-----------------+ | GetThreadTimes() | 100 ns | No | ? | +-------------------------+--------------+---------------+-----------------+ The "C Resolution" column is the resolution of the underlying C structure. Examples of clock resolution on x86_64: +-------------------------+------------------+---------------+-------------------+ | Name | Operating system | OS Resolution | Python Resolution | +=========================+==================+===============+===================+ | CLOCK_THREAD_CPUTIME_ID | FreeBSD 8.2 | 1 µs | 1 µs | +-------------------------+------------------+---------------+-------------------+ | CLOCK_THREAD_CPUTIME_ID | Linux 3.3 | 1 ns | 649 ns | +-------------------------+------------------+---------------+-------------------+ | GetThreadTimes() | Windows Seven | 16 ms | 16 ms | +-------------------------+------------------+---------------+-------------------+ The "OS Resolution" is the resolution announced by the operating system. The "Python Resolution" is the smallest difference between two calls to the time function computed in Python using the clock_resolution.py program. Functions - Windows: GetThreadTimes(). The resolution can be read using GetSystemTimeAdjustment(). - clock_gettime(CLOCK_THREAD_CPUTIME_ID): Thread-specific CPU-time clock. It uses a number of CPU cycles, not a number of seconds. The resolution can be read using of clock_getres(). See also the QueryThreadCycleTime() function (cycle time for the specified thread) and pthread_getcpuclockid(). Windows: QueryUnbiasedInterruptTime Gets the current unbiased interrupt time from the biased interrupt time and the current sleep bias amount. This time is not affected by power management sleep transitions. The elapsed time retrieved by the QueryUnbiasedInterruptTime function includes only time that the system spends in the working state. QueryUnbiasedInterruptTime() is not monotonic. QueryUnbiasedInterruptTime() was introduced in Windows 7. See also QueryIdleProcessorCycleTime() function (cycle time for the idle thread of each processor) Sleep Suspend execution of the process for the given number of seconds. Sleep is not affected by system time updates. Sleep is paused during system suspend. For example, if a process sleeps for 60 seconds and the system is suspended for 30 seconds in the middle of the sleep, the sleep duration is 90 seconds in the real time. Sleep can be interrupted by a signal: the function fails with EINTR. +-------------------+--------------+ | Name | C Resolution | +===================+==============+ | nanosleep() | 1 ns | +-------------------+--------------+ | clock_nanosleep() | 1 ns | +-------------------+--------------+ | usleep() | 1 µs | +-------------------+--------------+ | delay() | 1 µs | +-------------------+--------------+ | sleep() | 1 sec | +-------------------+--------------+ Other functions: +--------------------------+--------------+ | Name | C Resolution | +==========================+==============+ | sigtimedwait() | 1 ns | +--------------------------+--------------+ | pthread_cond_timedwait() | 1 ns | +--------------------------+--------------+ | sem_timedwait() | 1 ns | +--------------------------+--------------+ | select() | 1 µs | +--------------------------+--------------+ | epoll() | 1 ms | +--------------------------+--------------+ | poll() | 1 ms | +--------------------------+--------------+ | WaitForSingleObject() | 1 ms | +--------------------------+--------------+ The "C Resolution" column is the resolution of the underlying C structure. Functions - sleep(seconds) - usleep(microseconds) - nanosleep(nanoseconds, remaining): Linux manpage of nanosleep() - delay(milliseconds) clock_nanosleep clock_nanosleep(clock_id, flags, nanoseconds, remaining): Linux manpage of clock_nanosleep(). If flags is TIMER_ABSTIME, then request is interpreted as an absolute time as measured by the clock, clock_id. If request is less than or equal to the current value of the clock, then clock_nanosleep() returns immediately without suspending the calling thread. POSIX.1 specifies that changing the value of the CLOCK_REALTIME clock via clock_settime(2) shall have no effect on a thread that is blocked on a relative clock_nanosleep(). select() select(nfds, readfds, writefds, exceptfs, timeout). Since Linux 2.6.28, select() uses high-resolution timers to handle the timeout. A process has a "slack" attribute to configure the precision of the timeout, the default slack is 50 microseconds. Before Linux 2.6.28, timeouts for select() were handled by the main timing subsystem at a jiffy-level resolution. Read also High- (but not too high-) resolution timeouts and Timer slack. Other functions - poll(), epoll() - sigtimedwait(). POSIX: "If the Monotonic Clock option is supported, the CLOCK_MONOTONIC clock shall be used to measure the time interval specified by the timeout argument." - pthread_cond_timedwait(), pthread_condattr_setclock(). "The default value of the clock attribute shall refer to the system time." - sem_timedwait(): "If the Timers option is supported, the timeout shall be based on the CLOCK_REALTIME clock. If the Timers option is not supported, the timeout shall be based on the system time as returned by the time() function. The precision of the timeout shall be the precision of the clock on which it is based." - WaitForSingleObject(): use the same timer than GetTickCount() with the same precision. System Standby The ACPI power state "S3" is a system standby mode, also called "Suspend to RAM". RAM remains powered. On Windows, the WM_POWERBROADCAST message is sent to Windows applications to notify them of power-management events (ex: owner status has changed). For Mac OS X, read Registering and unregistering for sleep and wake notifications (Technical Q&A QA1340). Footnotes Links Related Python issues: - Issue #12822: NewGIL should use CLOCK_MONOTONIC if possible. - Issue #14222: Use time.steady() to implement timeout - Issue #14309: Deprecate time.clock() - Issue #14397: Use GetTickCount/GetTickCount64 instead of QueryPerformanceCounter for monotonic clock - Issue #14428: Implementation of the PEP 418 - Issue #14555: clock_gettime/settime/getres: Add more clock identifiers Libraries exposing monotonic clocks: - Java: System.nanoTime - Qt library: QElapsedTimer - glib library: g_get_monotonic_time () uses GetTickCount64()/GetTickCount() on Windows, clock_gettime(CLOCK_MONOTONIC) on UNIX or falls back to the system clock - python-monotonic-time (github) - Monoclock.nano_count() uses clock_gettime(CLOCK_MONOTONIC) and returns a number of nanoseconds - monotonic_clock by Thomas Habets - Perl: Time::HiRes exposes clock_gettime(CLOCK_MONOTONIC) - Ruby: AbsoluteTime.now: use clock_gettime(CLOCK_MONOTONIC), mach_absolute_time() or gettimeofday(). "AbsoluteTime.monotonic?" method indicates if AbsoluteTime.now is monotonic or not. - libpthread: POSIX thread library for Windows (clock.c) - Boost.Chrono uses: - system_clock: - mac = gettimeofday() - posix = clock_gettime(CLOCK_REALTIME) - win = GetSystemTimeAsFileTime() - steady_clock: - mac = mach_absolute_time() - posix = clock_gettime(CLOCK_MONOTONIC) - win = QueryPerformanceCounter() - high_resolution_clock: - steady_clock, if available system_clock, otherwise Time: - Twisted issue #2424: Add reactor option to start with monotonic clock - gettimeofday() should never be used to measure time by Thomas Habets (2010-09-05) - hrtimers - subsystem for high-resolution kernel timers - C++ Timeout Specification by Lawrence Crowl (2010-08-19) - Windows: Game Timing and Multicore Processors by Chuck Walbourn (December 2005) - Implement a Continuously Updating, High-Resolution Time Provider for Windows by Johan Nilsson (March 2004) - clockspeed uses a hardware tick counter to compensate for a persistently fast or slow system time, by D. J. Bernstein (1998) - Retrieving system time lists hardware clocks and time functions with their resolution and epoch or range - On Windows, the JavaScript runtime of Firefox interpolates GetSystemTimeAsFileTime() with QueryPerformanceCounter() to get a higher resolution. See the Bug 363258 - bad millisecond resolution for (new Date).getTime() / Date.now() on Windows. - When microseconds matter: How the IBM High Resolution Time Stamp Facility accurately measures itty bits of time, by W. Nathaniel Mills, III (Apr 2002) - Win32 Performance Measurement Options by Matthew Wilson (May, 2003) - Counter Availability and Characteristics for Feed-forward Based Synchronization by Timothy Broomhead, Julien Ridoux, Darryl Veitch (2009) - System Management Interrupt (SMI) issues: - System Management Interrupt Free Hardware by Keith Mannthey (2009) - IBM Real-Time "SMI Free" mode driver by Keith Mannthey (Feb 2009) - Fixing Realtime problems caused by SMI on Ubuntu - [RFC] simple SMI detector by Jon Masters (Jan 2009) - [PATCH 2.6.34-rc3] A nonintrusive SMI sniffer for x86 by Joe Korty (2010-04) Acceptance The PEP was accepted on 2012-04-28 by Guido van Rossum[6]. The PEP implementation has since been committed to the repository. References Copyright This document has been placed in the public domain. [1] "_time" is a hypothetical module only used for the example. The time module is implemented in C and so there is no need for such a module. [2] "_time" is a hypothetical module only used for the example. The time module is implemented in C and so there is no need for such a module. [3] "_time" is a hypothetical module only used for the example. The time module is implemented in C and so there is no need for such a module. [4] "_time" is a hypothetical module only used for the example. The time module is implemented in C and so there is no need for such a module. [5] "_time" is a hypothetical module only used for the example. The time module is implemented in C and so there is no need for such a module. [6] https://mail.python.org/pipermail/python-dev/2012-April/119094.html
python-peps
2024-10-18T13:23:31.932085
2012-03-26T00:00:00
{ "license": "Public Domain", "url": "https://peps.python.org/pep-0418/", "authors": [ "Cameron Simpson" ], "pep_number": "0418", "pandoc_version": "3.5" }
0647
PEP: 647 Title: User-Defined Type Guards Author: Eric Traut <erictr at microsoft.com> Sponsor: Guido van Rossum <[email protected]> Discussions-To: [email protected] Status: Final Type: Standards Track Topic: Typing Created: 07-Oct-2020 Python-Version: 3.10 Post-History: 28-Dec-2020, 09-Apr-2021 Resolution: https://mail.python.org/archives/list/[email protected]/thread/2ME6F6YUVKHOQYKSHTVQQU5WD4CVAZU4/ typing:typeguard and typing.TypeGuard Abstract This PEP specifies a way for programs to influence conditional type narrowing employed by a type checker based on runtime checks. Motivation Static type checkers commonly employ a technique called "type narrowing" to determine a more precise type of an expression within a program's code flow. When type narrowing is applied within a block of code based on a conditional code flow statement (such as if and while statements), the conditional expression is sometimes referred to as a "type guard". Python type checkers typically support various forms of type guards expressions. def func(val: Optional[str]): # "is None" type guard if val is not None: # Type of val is narrowed to str ... else: # Type of val is narrowed to None ... def func(val: Optional[str]): # Truthy type guard if val: # Type of val is narrowed to str ... else: # Type of val remains Optional[str] ... def func(val: Union[str, float]): # "isinstance" type guard if isinstance(val, str): # Type of val is narrowed to str ... else: # Type of val is narrowed to float ... def func(val: Literal[1, 2]): # Comparison type guard if val == 1: # Type of val is narrowed to Literal[1] ... else: # Type of val is narrowed to Literal[2] ... There are cases where type narrowing cannot be applied based on static information only. Consider the following example: def is_str_list(val: List[object]) -> bool: """Determines whether all objects in the list are strings""" return all(isinstance(x, str) for x in val) def func1(val: List[object]): if is_str_list(val): print(" ".join(val)) # Error: invalid type This code is correct, but a type checker will report a type error because the value val passed to the join method is understood to be of type List[object]. The type checker does not have enough information to statically verify that the type of val is List[str] at this point. This PEP introduces a way for a function like is_str_list to be defined as a "user-defined type guard". This allows code to extend the type guards that are supported by type checkers. Using this new mechanism, the is_str_list function in the above example would be modified slightly. Its return type would be changed from bool to TypeGuard[List[str]]. This promises not merely that the return value is boolean, but that a true indicates the input to the function was of the specified type. from typing import TypeGuard def is_str_list(val: List[object]) -> TypeGuard[List[str]]: """Determines whether all objects in the list are strings""" return all(isinstance(x, str) for x in val) User-defined type guards can also be used to determine whether a dictionary conforms to the type requirements of a TypedDict. class Person(TypedDict): name: str age: int def is_person(val: dict) -> "TypeGuard[Person]": try: return isinstance(val["name"], str) and isinstance(val["age"], int) except KeyError: return False def print_age(val: dict): if is_person(val): print(f"Age: {val['age']}") else: print("Not a person!") Specification TypeGuard Type This PEP introduces the symbol TypeGuard exported from the typing module. TypeGuard is a special form that accepts a single type argument. It is used to annotate the return type of a user-defined type guard function. Return statements within a type guard function should return bool values, and type checkers should verify that all return paths return a bool. In all other respects, TypeGuard is a distinct type from bool. It is not a subtype of bool. Therefore, Callable[..., TypeGuard[int]] is not assignable to Callable[..., bool]. When TypeGuard is used to annotate the return type of a function or method that accepts at least one parameter, that function or method is treated by type checkers as a user-defined type guard. The type argument provided for TypeGuard indicates the type that has been validated by the function. User-defined type guards can be generic functions, as shown in this example: _T = TypeVar("_T") def is_two_element_tuple(val: Tuple[_T, ...]) -> TypeGuard[Tuple[_T, _T]]: return len(val) == 2 def func(names: Tuple[str, ...]): if is_two_element_tuple(names): reveal_type(names) # Tuple[str, str] else: reveal_type(names) # Tuple[str, ...] Type checkers should assume that type narrowing should be applied to the expression that is passed as the first positional argument to a user-defined type guard. If the type guard function accepts more than one argument, no type narrowing is applied to those additional argument expressions. If a type guard function is implemented as an instance method or class method, the first positional argument maps to the second parameter (after "self" or "cls"). Here are some examples of user-defined type guard functions that accept more than one argument: def is_str_list(val: List[object], allow_empty: bool) -> TypeGuard[List[str]]: if len(val) == 0: return allow_empty return all(isinstance(x, str) for x in val) _T = TypeVar("_T") def is_set_of(val: Set[Any], type: Type[_T]) -> TypeGuard[Set[_T]]: return all(isinstance(x, type) for x in val) The return type of a user-defined type guard function will normally refer to a type that is strictly "narrower" than the type of the first argument (that is, it's a more specific type that can be assigned to the more general type). However, it is not required that the return type be strictly narrower. This allows for cases like the example above where List[str] is not assignable to List[object]. When a conditional statement includes a call to a user-defined type guard function, and that function returns true, the expression passed as the first positional argument to the type guard function should be assumed by a static type checker to take on the type specified in the TypeGuard return type, unless and until it is further narrowed within the conditional code block. Some built-in type guards provide narrowing for both positive and negative tests (in both the if and else clauses). For example, consider the type guard for an expression of the form x is None. If x has a type that is a union of None and some other type, it will be narrowed to None in the positive case and the other type in the negative case. User-defined type guards apply narrowing only in the positive case (the if clause). The type is not narrowed in the negative case. OneOrTwoStrs = Union[Tuple[str], Tuple[str, str]] def func(val: OneOrTwoStrs): if is_two_element_tuple(val): reveal_type(val) # Tuple[str, str] ... else: reveal_type(val) # OneOrTwoStrs ... if not is_two_element_tuple(val): reveal_type(val) # OneOrTwoStrs ... else: reveal_type(val) # Tuple[str, str] ... Backwards Compatibility Existing code that does not use this new functionality will be unaffected. Notably, code which uses annotations in a manner incompatible with the stdlib typing library should simply not import TypeGuard. Reference Implementation The Pyright type checker supports the behavior described in this PEP. Rejected Ideas Decorator Syntax The use of a decorator was considered for defining type guards. @type_guard(List[str]) def is_str_list(val: List[object]) -> bool: ... The decorator approach is inferior because it requires runtime evaluation of the type, precluding forward references. The proposed approach was also deemed to be easier to understand and simpler to implement. Enforcing Strict Narrowing Strict type narrowing enforcement (requiring that the type specified in the TypeGuard type argument is a narrower form of the type specified for the first parameter) was considered, but this eliminates valuable use cases for this functionality. For instance, the is_str_list example above would be considered invalid because List[str] is not a subtype of List[object] because of invariance rules. One variation that was considered was to require a strict narrowing requirement by default but allow the type guard function to specify some flag to indicate that it is not following this requirement. This was rejected because it was deemed cumbersome and unnecessary. Another consideration was to define some less-strict check that ensures that there is some overlap between the value type and the narrowed type specified in the TypeGuard. The problem with this proposal is that the rules for type compatibility are already very complex when considering unions, protocols, type variables, generics, etc. Defining a variant of these rules that relaxes some of these constraints just for the purpose of this feature would require that we articulate all of the subtle ways in which the rules differ and under what specific circumstances the constrains are relaxed. For this reason, it was decided to omit all checks. It was noted that without enforcing strict narrowing, it would be possible to break type safety. A poorly-written type guard function could produce unsafe or even nonsensical results. For example: def f(value: int) -> TypeGuard[str]: return True However, there are many ways a determined or uninformed developer can subvert type safety -- most commonly by using cast or Any. If a Python developer takes the time to learn about and implement user-defined type guards within their code, it is safe to assume that they are interested in type safety and will not write their type guard functions in a way that will undermine type safety or produce nonsensical results. Conditionally Applying TypeGuard Type It was suggested that the expression passed as the first argument to a type guard function should retain its existing type if the type of the expression was a proper subtype of the type specified in the TypeGuard return type. For example, if the type guard function is def f(value: object) -> TypeGuard[float] and the expression passed to this function is of type int, it would retain the int type rather than take on the float type indicated by the TypeGuard return type. This proposal was rejected because it added complexity, inconsistency, and opened up additional questions about the proper behavior if the type of the expression was of composite types like unions or type variables with multiple constraints. It was decided that the added complexity and inconsistency was not justified given that it would provide little or no added value. Narrowing of Arbitrary Parameters TypeScript's formulation of user-defined type guards allows for any input parameter to be used as the value tested for narrowing. The TypeScript language authors could not recall any real-world examples in TypeScript where the parameter being tested was not the first parameter. For this reason, it was decided unnecessary to burden the Python implementation of user-defined type guards with additional complexity to support a contrived use case. If such use cases are identified in the future, there are ways the TypeGuard mechanism could be extended. This could involve the use of keyword indexing, as proposed in PEP 637. Narrowing of Implicit "self" and "cls" Parameters The proposal states that the first positional argument is assumed to be the value that is tested for narrowing. If the type guard function is implemented as an instance or class method, an implicit self or cls argument will also be passed to the function. A concern was raised that there may be cases where it is desired to apply the narrowing logic on self and cls. This is an unusual use case, and accommodating it would significantly complicate the implementation of user-defined type guards. It was therefore decided that no special provision would be made for it. If narrowing of self or cls is required, the value can be passed as an explicit argument to a type guard function. Copyright This document is placed in the public domain or under the CC0-1.0-Universal license, whichever is more permissive.
python-peps
2024-10-18T13:23:31.947265
2020-10-07T00:00:00
{ "license": "Public Domain", "url": "https://peps.python.org/pep-0647/", "authors": [ "Eric Traut" ], "pep_number": "0647", "pandoc_version": "3.5" }
0334
PEP: 334 Title: Simple Coroutines via SuspendIteration Version: $Revision$ Last-Modified: $Date$ Author: Clark C. Evans <[email protected]> Status: Withdrawn Type: Standards Track Content-Type: text/x-rst Created: 26-Aug-2004 Python-Version: 3.0 Post-History: Abstract Asynchronous application frameworks such as Twisted[1] and Peak [2], are based on a cooperative multitasking via event queues or deferred execution. While this approach to application development does not involve threads and thus avoids a whole class of problems [3], it creates a different sort of programming challenge. When an I/O operation would block, a user request must suspend so that other requests can proceed. The concept of a coroutine[4] promises to help the application developer grapple with this state management difficulty. This PEP proposes a limited approach to coroutines based on an extension to the iterator protocol <234>. Currently, an iterator may raise a StopIteration exception to indicate that it is done producing values. This proposal adds another exception to this protocol, SuspendIteration, which indicates that the given iterator may have more values to produce, but is unable to do so at this time. Rationale There are two current approaches to bringing co-routines to Python. Christian Tismer's Stackless[5] involves a ground-up restructuring of Python's execution model by hacking the 'C' stack. While this approach works, its operation is hard to describe and keep portable. A related approach is to compile Python code to Parrot[6], a register-based virtual machine, which has coroutines. Unfortunately, neither of these solutions is portable with IronPython (CLR) or Jython (JavaVM). It is thought that a more limited approach, based on iterators, could provide a coroutine facility to application programmers and still be portable across runtimes. - Iterators keep their state in local variables that are not on the "C" stack. Iterators can be viewed as classes, with state stored in member variables that are persistent across calls to its next() method. - While an uncaught exception may terminate a function's execution, an uncaught exception need not invalidate an iterator. The proposed exception, SuspendIteration, uses this feature. In other words, just because one call to next() results in an exception does not necessarily need to imply that the iterator itself is no longer capable of producing values. There are four places where this new exception impacts: - The PEP 255 simple generator mechanism could be extended to safely 'catch' this SuspendIteration exception, stuff away its current state, and pass the exception on to the caller. - Various iterator filters[7] in the standard library, such as itertools.izip should be made aware of this exception so that it can transparently propagate SuspendIteration. - Iterators generated from I/O operations, such as a file or socket reader, could be modified to have a non-blocking variety. This option would raise a subclass of SuspendIteration if the requested operation would block. - The asyncore library could be updated to provide a basic 'runner' that pulls from an iterator; if the SuspendIteration exception is caught, then it moves on to the next iterator in its runlist[8]. External frameworks like Twisted would provide alternative implementations, perhaps based on FreeBSD's kqueue or Linux's epoll. While these may seem dramatic changes, it is a very small amount of work compared with the utility provided by continuations. Semantics This section will explain, at a high level, how the introduction of this new SuspendIteration exception would behave. Simple Iterators The current functionality of iterators is best seen with a simple example which produces two values 'one' and 'two'. : class States: def __iter__(self): self._next = self.state_one return self def next(self): return self._next() def state_one(self): self._next = self.state_two return "one" def state_two(self): self._next = self.state_stop return "two" def state_stop(self): raise StopIteration print list(States()) An equivalent iteration could, of course, be created by the following generator: def States(): yield 'one' yield 'two' print list(States()) Introducing SuspendIteration Suppose that between producing 'one' and 'two', the generator above could block on a socket read. In this case, we would want to raise SuspendIteration to signal that the iterator is not done producing, but is unable to provide a value at the current moment. : from random import randint from time import sleep class SuspendIteration(Exception): pass class NonBlockingResource: """Randomly unable to produce the second value""" def __iter__(self): self._next = self.state_one return self def next(self): return self._next() def state_one(self): self._next = self.state_suspend return "one" def state_suspend(self): rand = randint(1,10) if 2 == rand: self._next = self.state_two return self.state_two() raise SuspendIteration() def state_two(self): self._next = self.state_stop return "two" def state_stop(self): raise StopIteration def sleeplist(iterator, timeout = .1): """ Do other things (e.g. sleep) while resource is unable to provide the next value """ it = iter(iterator) retval = [] while True: try: retval.append(it.next()) except SuspendIteration: sleep(timeout) continue except StopIteration: break return retval print sleeplist(NonBlockingResource()) In a real-world situation, the NonBlockingResource would be a file iterator, socket handle, or other I/O based producer. The sleeplist would instead be an async reactor, such as those found in asyncore or Twisted. The non-blocking resource could, of course, be written as a generator: def NonBlockingResource(): yield "one" while True: rand = randint(1,10) if 2 == rand: break raise SuspendIteration() yield "two" It is not necessary to add a keyword, 'suspend', since most real content generators will not be in application code, they will be in low-level I/O based operations. Since most programmers need not be exposed to the SuspendIteration() mechanism, a keyword is not needed. Application Iterators The previous example is rather contrived, a more 'real-world' example would be a web page generator which yields HTML content, and pulls from a database. Note that this is an example of neither the 'producer' nor the 'consumer', but rather of a filter. : def ListAlbums(cursor): cursor.execute("SELECT title, artist FROM album") yield '<html><body><table><tr><td>Title</td><td>Artist</td></tr>' for (title, artist) in cursor: yield '<tr><td>%s</td><td>%s</td></tr>' % (title, artist) yield '</table></body></html>' The problem, of course, is that the database may block for some time before any rows are returned, and that during execution, rows may be returned in blocks of 10 or 100 at a time. Ideally, if the database blocks for the next set of rows, another user connection could be serviced. Note the complete absence of SuspendIterator in the above code. If done correctly, application developers would be able to focus on functionality rather than concurrency issues. The iterator created by the above generator should do the magic necessary to maintain state, yet pass the exception through to a lower-level async framework. Here is an example of what the corresponding iterator would look like if coded up as a class: class ListAlbums: def __init__(self, cursor): self.cursor = cursor def __iter__(self): self.cursor.execute("SELECT title, artist FROM album") self._iter = iter(self._cursor) self._next = self.state_head return self def next(self): return self._next() def state_head(self): self._next = self.state_cursor return "<html><body><table><tr><td>\ Title</td><td>Artist</td></tr>" def state_tail(self): self._next = self.state_stop return "</table></body></html>" def state_cursor(self): try: (title,artist) = self._iter.next() return '<tr><td>%s</td><td>%s</td></tr>' % (title, artist) except StopIteration: self._next = self.state_tail return self.next() except SuspendIteration: # just pass-through raise def state_stop(self): raise StopIteration Complicating Factors While the above example is straightforward, things are a bit more complicated if the intermediate generator 'condenses' values, that is, it pulls in two or more values for each value it produces. For example, : def pair(iterLeft,iterRight): rhs = iter(iterRight) lhs = iter(iterLeft) while True: yield (rhs.next(), lhs.next()) In this case, the corresponding iterator behavior has to be a bit more subtle to handle the case of either the right or left iterator raising SuspendIteration. It seems to be a matter of decomposing the generator to recognize intermediate states where a SuspendIterator exception from the producing context could happen. : class pair: def __init__(self, iterLeft, iterRight): self.iterLeft = iterLeft self.iterRight = iterRight def __iter__(self): self.rhs = iter(iterRight) self.lhs = iter(iterLeft) self._temp_rhs = None self._temp_lhs = None self._next = self.state_rhs return self def next(self): return self._next() def state_rhs(self): self._temp_rhs = self.rhs.next() self._next = self.state_lhs return self.next() def state_lhs(self): self._temp_lhs = self.lhs.next() self._next = self.state_pair return self.next() def state_pair(self): self._next = self.state_rhs return (self._temp_rhs, self._temp_lhs) This proposal assumes that a corresponding iterator written using this class-based method is possible for existing generators. The challenge seems to be the identification of distinct states within the generator where suspension could occur. Resource Cleanup The current generator mechanism has a strange interaction with exceptions where a 'yield' statement is not allowed within a try/finally block. The SuspendIterator exception provides another similar issue. The impacts of this issue are not clear. However it may be that re-writing the generator into a state machine, as the previous section did, could resolve this issue allowing for the situation to be no-worse than, and perhaps even removing the yield/finally situation. More investigation is needed in this area. API and Limitations This proposal only covers 'suspending' a chain of iterators, and does not cover (of course) suspending general functions, methods, or "C" extension function. While there could be no direct support for creating generators in "C" code, native "C" iterators which comply with the SuspendIterator semantics are certainly possible. Low-Level Implementation The author of the PEP is not yet familiar with the Python execution model to comment in this area. References Copyright This document has been placed in the public domain. Local Variables: mode: indented-text indent-tabs-mode: nil sentence-end-double-space: t fill-column: 70 End: [1] Twisted (http://twistedmatrix.com) [2] Peak (http://peak.telecommunity.com) [3] C10K (http://www.kegel.com/c10k.html) [4] Coroutines (http://c2.com/cgi/wiki?CallWithCurrentContinuation) [5] Stackless Python (http://stackless.com) [6] Parrot /w coroutines (http://www.sidhe.org/~dan/blog/archives/000178.html) [7] itertools - Functions creating iterators (http://docs.python.org/library/itertools.html) [8] Microthreads in Python, David Mertz (http://www-106.ibm.com/developerworks/linux/library/l-pythrd.html)
python-peps
2024-10-18T13:23:31.959156
2004-08-26T00:00:00
{ "license": "Public Domain", "url": "https://peps.python.org/pep-0334/", "authors": [ "Clark C. Evans" ], "pep_number": "0334", "pandoc_version": "3.5" }
3333
PEP: 3333 Title: Python Web Server Gateway Interface v1.0.1 Author: Phillip J. Eby <[email protected]> Discussions-To: [email protected] Status: Final Type: Informational Content-Type: text/x-rst Created: 26-Sep-2010 Post-History: 26-Sep-2010, 04-Oct-2010 Replaces: 333 Preface for Readers of PEP 333 This is an updated version of PEP 333, modified slightly to improve usability under Python 3, and to incorporate several long-standing de facto amendments to the WSGI protocol. (Its code samples have also been ported to Python 3.) While for procedural reasons[1], this must be a distinct PEP, no changes were made that invalidate previously-compliant servers or applications under Python 2.x. If your 2.x application or server is compliant to PEP 333, it is also compliant with this PEP. Under Python 3, however, your app or server must also follow the rules outlined in the sections below titled, A Note On String Types, and Unicode Issues. For detailed, line-by-line diffs between this document and PEP 333, you may view its SVN revision history[2], from revision 84854 forward. Abstract This document specifies a proposed standard interface between web servers and Python web applications or frameworks, to promote web application portability across a variety of web servers. Original Rationale and Goals (from PEP 333) Python currently boasts a wide variety of web application frameworks, such as Zope, Quixote, Webware, SkunkWeb, PSO, and Twisted Web -- to name just a few[3]. This wide variety of choices can be a problem for new Python users, because generally speaking, their choice of web framework will limit their choice of usable web servers, and vice versa. By contrast, although Java has just as many web application frameworks available, Java's "servlet" API makes it possible for applications written with any Java web application framework to run in any web server that supports the servlet API. The availability and widespread use of such an API in web servers for Python -- whether those servers are written in Python (e.g. Medusa), embed Python (e.g. mod_python), or invoke Python via a gateway protocol (e.g. CGI, FastCGI, etc.) -- would separate choice of framework from choice of web server, freeing users to choose a pairing that suits them, while freeing framework and server developers to focus on their preferred area of specialization. This PEP, therefore, proposes a simple and universal interface between web servers and web applications or frameworks: the Python Web Server Gateway Interface (WSGI). But the mere existence of a WSGI spec does nothing to address the existing state of servers and frameworks for Python web applications. Server and framework authors and maintainers must actually implement WSGI for there to be any effect. However, since no existing servers or frameworks support WSGI, there is little immediate reward for an author who implements WSGI support. Thus, WSGI must be easy to implement, so that an author's initial investment in the interface can be reasonably low. Thus, simplicity of implementation on both the server and framework sides of the interface is absolutely critical to the utility of the WSGI interface, and is therefore the principal criterion for any design decisions. Note, however, that simplicity of implementation for a framework author is not the same thing as ease of use for a web application author. WSGI presents an absolutely "no frills" interface to the framework author, because bells and whistles like response objects and cookie handling would just get in the way of existing frameworks' handling of these issues. Again, the goal of WSGI is to facilitate easy interconnection of existing servers and applications or frameworks, not to create a new web framework. Note also that this goal precludes WSGI from requiring anything that is not already available in deployed versions of Python. Therefore, new standard library modules are not proposed or required by this specification, and nothing in WSGI requires a Python version greater than 2.2.2. (It would be a good idea, however, for future versions of Python to include support for this interface in web servers provided by the standard library.) In addition to ease of implementation for existing and future frameworks and servers, it should also be easy to create request preprocessors, response postprocessors, and other WSGI-based "middleware" components that look like an application to their containing server, while acting as a server for their contained applications. If middleware can be both simple and robust, and WSGI is widely available in servers and frameworks, it allows for the possibility of an entirely new kind of Python web application framework: one consisting of loosely-coupled WSGI middleware components. Indeed, existing framework authors may even choose to refactor their frameworks' existing services to be provided in this way, becoming more like libraries used with WSGI, and less like monolithic frameworks. This would then allow application developers to choose "best-of-breed" components for specific functionality, rather than having to commit to all the pros and cons of a single framework. Of course, as of this writing, that day is doubtless quite far off. In the meantime, it is a sufficient short-term goal for WSGI to enable the use of any framework with any server. Finally, it should be mentioned that the current version of WSGI does not prescribe any particular mechanism for "deploying" an application for use with a web server or server gateway. At the present time, this is necessarily implementation-defined by the server or gateway. After a sufficient number of servers and frameworks have implemented WSGI to provide field experience with varying deployment requirements, it may make sense to create another PEP, describing a deployment standard for WSGI servers and application frameworks. Specification Overview The WSGI interface has two sides: the "server" or "gateway" side, and the "application" or "framework" side. The server side invokes a callable object that is provided by the application side. The specifics of how that object is provided are up to the server or gateway. It is assumed that some servers or gateways will require an application's deployer to write a short script to create an instance of the server or gateway, and supply it with the application object. Other servers and gateways may use configuration files or other mechanisms to specify where an application object should be imported from, or otherwise obtained. In addition to "pure" servers/gateways and applications/frameworks, it is also possible to create "middleware" components that implement both sides of this specification. Such components act as an application to their containing server, and as a server to a contained application, and can be used to provide extended APIs, content transformation, navigation, and other useful functions. Throughout this specification, we will use the term "a callable" to mean "a function, method, class, or an instance with a __call__ method". It is up to the server, gateway, or application implementing the callable to choose the appropriate implementation technique for their needs. Conversely, a server, gateway, or application that is invoking a callable must not have any dependency on what kind of callable was provided to it. Callables are only to be called, not introspected upon. A Note On String Types In general, HTTP deals with bytes, which means that this specification is mostly about handling bytes. However, the content of those bytes often has some kind of textual interpretation, and in Python, strings are the most convenient way to handle text. But in many Python versions and implementations, strings are Unicode, rather than bytes. This requires a careful balance between a usable API and correct translations between bytes and text in the context of HTTP... especially to support porting code between Python implementations with different str types. WSGI therefore defines two kinds of "string": - "Native" strings (which are always implemented using the type named str) that are used for request/response headers and metadata - "Bytestrings" (which are implemented using the bytes type in Python 3, and str elsewhere), that are used for the bodies of requests and responses (e.g. POST/PUT input data and HTML page outputs). Do not be confused however: even if Python's str type is actually Unicode "under the hood", the content of native strings must still be translatable to bytes via the Latin-1 encoding! (See the section on Unicode Issues later in this document for more details.) In short: where you see the word "string" in this document, it refers to a "native" string, i.e., an object of type str, whether it is internally implemented as bytes or unicode. Where you see references to "bytestring", this should be read as "an object of type bytes under Python 3, or type str under Python 2". And so, even though HTTP is in some sense "really just bytes", there are many API conveniences to be had by using whatever Python's default str type is. The Application/Framework Side The application object is simply a callable object that accepts two arguments. The term "object" should not be misconstrued as requiring an actual object instance: a function, method, class, or instance with a __call__ method are all acceptable for use as an application object. Application objects must be able to be invoked more than once, as virtually all servers/gateways (other than CGI) will make such repeated requests. (Note: although we refer to it as an "application" object, this should not be construed to mean that application developers will use WSGI as a web programming API! It is assumed that application developers will continue to use existing, high-level framework services to develop their applications. WSGI is a tool for framework and server developers, and is not intended to directly support application developers.) Here are two example application objects; one is a function, and the other is a class: HELLO_WORLD = b"Hello world!\n" def simple_app(environ, start_response): """Simplest possible application object""" status = '200 OK' response_headers = [('Content-type', 'text/plain')] start_response(status, response_headers) return [HELLO_WORLD] class AppClass: """Produce the same output, but using a class (Note: 'AppClass' is the "application" here, so calling it returns an instance of 'AppClass', which is then the iterable return value of the "application callable" as required by the spec. If we wanted to use *instances* of 'AppClass' as application objects instead, we would have to implement a '__call__' method, which would be invoked to execute the application, and we would need to create an instance for use by the server or gateway. """ def __init__(self, environ, start_response): self.environ = environ self.start = start_response def __iter__(self): status = '200 OK' response_headers = [('Content-type', 'text/plain')] self.start(status, response_headers) yield HELLO_WORLD The Server/Gateway Side The server or gateway invokes the application callable once for each request it receives from an HTTP client, that is directed at the application. To illustrate, here is a simple CGI gateway, implemented as a function taking an application object. Note that this simple example has limited error handling, because by default an uncaught exception will be dumped to sys.stderr and logged by the web server. import os, sys enc, esc = sys.getfilesystemencoding(), 'surrogateescape' def unicode_to_wsgi(u): # Convert an environment variable to a WSGI "bytes-as-unicode" string return u.encode(enc, esc).decode('iso-8859-1') def wsgi_to_bytes(s): return s.encode('iso-8859-1') def run_with_cgi(application): environ = {k: unicode_to_wsgi(v) for k,v in os.environ.items()} environ['wsgi.input'] = sys.stdin.buffer environ['wsgi.errors'] = sys.stderr environ['wsgi.version'] = (1, 0) environ['wsgi.multithread'] = False environ['wsgi.multiprocess'] = True environ['wsgi.run_once'] = True if environ.get('HTTPS', 'off') in ('on', '1'): environ['wsgi.url_scheme'] = 'https' else: environ['wsgi.url_scheme'] = 'http' headers_set = [] headers_sent = [] def write(data): out = sys.stdout.buffer if not headers_set: raise AssertionError("write() before start_response()") elif not headers_sent: # Before the first output, send the stored headers status, response_headers = headers_sent[:] = headers_set out.write(wsgi_to_bytes('Status: %s\r\n' % status)) for header in response_headers: out.write(wsgi_to_bytes('%s: %s\r\n' % header)) out.write(wsgi_to_bytes('\r\n')) out.write(data) out.flush() def start_response(status, response_headers, exc_info=None): if exc_info: try: if headers_sent: # Re-raise original exception if headers sent raise exc_info[1].with_traceback(exc_info[2]) finally: exc_info = None # avoid dangling circular ref elif headers_set: raise AssertionError("Headers already set!") headers_set[:] = [status, response_headers] # Note: error checking on the headers should happen here, # *after* the headers are set. That way, if an error # occurs, start_response can only be re-called with # exc_info set. return write result = application(environ, start_response) try: for data in result: if data: # don't send headers until body appears write(data) if not headers_sent: write(b'') # send headers now if body was empty finally: if hasattr(result, 'close'): result.close() Middleware: Components that Play Both Sides Note that a single object may play the role of a server with respect to some application(s), while also acting as an application with respect to some server(s). Such "middleware" components can perform such functions as: - Routing a request to different application objects based on the target URL, after rewriting the environ accordingly. - Allowing multiple applications or frameworks to run side by side in the same process - Load balancing and remote processing, by forwarding requests and responses over a network - Perform content postprocessing, such as applying XSL stylesheets The presence of middleware in general is transparent to both the "server/gateway" and the "application/framework" sides of the interface, and should require no special support. A user who desires to incorporate middleware into an application simply provides the middleware component to the server, as if it were an application, and configures the middleware component to invoke the application, as if the middleware component were a server. Of course, the "application" that the middleware wraps may in fact be another middleware component wrapping another application, and so on, creating what is referred to as a "middleware stack". For the most part, middleware must conform to the restrictions and requirements of both the server and application sides of WSGI. In some cases, however, requirements for middleware are more stringent than for a "pure" server or application, and these points will be noted in the specification. Here is a (tongue-in-cheek) example of a middleware component that converts text/plain responses to pig Latin, using Joe Strout's piglatin.py. (Note: a "real" middleware component would probably use a more robust way of checking the content type, and should also check for a content encoding. Also, this simple example ignores the possibility that a word might be split across a block boundary.) from piglatin import piglatin class LatinIter: """Transform iterated output to piglatin, if it's okay to do so Note that the "okayness" can change until the application yields its first non-empty bytestring, so 'transform_ok' has to be a mutable truth value. """ def __init__(self, result, transform_ok): if hasattr(result, 'close'): self.close = result.close self._next = iter(result).__next__ self.transform_ok = transform_ok def __iter__(self): return self def __next__(self): data = self._next() if self.transform_ok: return piglatin(data) # call must be byte-safe on Py3 else: return data class Latinator: # by default, don't transform output transform = False def __init__(self, application): self.application = application def __call__(self, environ, start_response): transform_ok = [] def start_latin(status, response_headers, exc_info=None): # Reset ok flag, in case this is a repeat call del transform_ok[:] for name, value in response_headers: if name.lower() == 'content-type' and value == 'text/plain': transform_ok.append(True) # Strip content-length if present, else it'll be wrong response_headers = [(name, value) for name, value in response_headers if name.lower() != 'content-length' ] break write = start_response(status, response_headers, exc_info) if transform_ok: def write_latin(data): write(piglatin(data)) # call must be byte-safe on Py3 return write_latin else: return write return LatinIter(self.application(environ, start_latin), transform_ok) # Run foo_app under a Latinator's control, using the example CGI gateway from foo_app import foo_app run_with_cgi(Latinator(foo_app)) Specification Details The application object must accept two positional arguments. For the sake of illustration, we have named them environ and start_response, but they are not required to have these names. A server or gateway must invoke the application object using positional (not keyword) arguments. (E.g. by calling result = application(environ, start_response) as shown above.) The environ parameter is a dictionary object, containing CGI-style environment variables. This object must be a builtin Python dictionary (not a subclass, UserDict or other dictionary emulation), and the application is allowed to modify the dictionary in any way it desires. The dictionary must also include certain WSGI-required variables (described in a later section), and may also include server-specific extension variables, named according to a convention that will be described below. The start_response parameter is a callable accepting two required positional arguments, and one optional argument. For the sake of illustration, we have named these arguments status, response_headers, and exc_info, but they are not required to have these names, and the application must invoke the start_response callable using positional arguments (e.g. start_response(status, response_headers)). The status parameter is a status string of the form "999 Message here", and response_headers is a list of (header_name, header_value) tuples describing the HTTP response header. The optional exc_info parameter is described below in the sections on The start_response() Callable and Error Handling. It is used only when the application has trapped an error and is attempting to display an error message to the browser. The start_response callable must return a write(body_data) callable that takes one positional parameter: a bytestring to be written as part of the HTTP response body. (Note: the write() callable is provided only to support certain existing frameworks' imperative output APIs; it should not be used by new applications or frameworks if it can be avoided. See the Buffering and Streaming section for more details.) When called by the server, the application object must return an iterable yielding zero or more bytestrings. This can be accomplished in a variety of ways, such as by returning a list of bytestrings, or by the application being a generator function that yields bytestrings, or by the application being a class whose instances are iterable. Regardless of how it is accomplished, the application object must always return an iterable yielding zero or more bytestrings. The server or gateway must transmit the yielded bytestrings to the client in an unbuffered fashion, completing the transmission of each bytestring before requesting another one. (In other words, applications should perform their own buffering. See the Buffering and Streaming section below for more on how application output must be handled.) The server or gateway should treat the yielded bytestrings as binary byte sequences: in particular, it should ensure that line endings are not altered. The application is responsible for ensuring that the bytestring(s) to be written are in a format suitable for the client. (The server or gateway may apply HTTP transfer encodings, or perform other transformations for the purpose of implementing HTTP features such as byte-range transmission. See Other HTTP Features, below, for more details.) If a call to len(iterable) succeeds, the server must be able to rely on the result being accurate. That is, if the iterable returned by the application provides a working __len__() method, it must return an accurate result. (See the Handling the Content-Length Header section for information on how this would normally be used.) If the iterable returned by the application has a close() method, the server or gateway must call that method upon completion of the current request, whether the request was completed normally, or terminated early due to an application error during iteration or an early disconnect of the browser. (The close() method requirement is to support resource release by the application. This protocol is intended to complement PEP 342's generator support, and other common iterables with close() methods.) Applications returning a generator or other custom iterator should not assume the entire iterator will be consumed, as it may be closed early by the server. (Note: the application must invoke the start_response() callable before the iterable yields its first body bytestring, so that the server can send the headers before any body content. However, this invocation may be performed by the iterable's first iteration, so servers must not assume that start_response() has been called before they begin iterating over the iterable.) Finally, servers and gateways must not directly use any other attributes of the iterable returned by the application, unless it is an instance of a type specific to that server or gateway, such as a "file wrapper" returned by wsgi.file_wrapper (see Optional Platform-Specific File Handling). In the general case, only attributes specified here, or accessed via e.g. the PEP 234 iteration APIs are acceptable. environ Variables The environ dictionary is required to contain these CGI environment variables, as defined by the Common Gateway Interface specification[4]. The following variables must be present, unless their value would be an empty string, in which case they may be omitted, except as otherwise noted below. REQUEST_METHOD The HTTP request method, such as "GET" or "POST". This cannot ever be an empty string, and so is always required. SCRIPT_NAME The initial portion of the request URL's "path" that corresponds to the application object, so that the application knows its virtual "location". This may be an empty string, if the application corresponds to the "root" of the server. PATH_INFO The remainder of the request URL's "path", designating the virtual "location" of the request's target within the application. This may be an empty string, if the request URL targets the application root and does not have a trailing slash. QUERY_STRING The portion of the request URL that follows the "?", if any. May be empty or absent. CONTENT_TYPE The contents of any Content-Type fields in the HTTP request. May be empty or absent. CONTENT_LENGTH The contents of any Content-Length fields in the HTTP request. May be empty or absent. SERVER_NAME, SERVER_PORT When HTTP_HOST is not set, these variables can be combined to determine a default. See the URL Reconstruction section below for more detail. SERVER_NAME and SERVER_PORT are required strings and must never be empty. SERVER_PROTOCOL The version of the protocol the client used to send the request. Typically this will be something like "HTTP/1.0" or "HTTP/1.1" and may be used by the application to determine how to treat any HTTP request headers. (This variable should probably be called REQUEST_PROTOCOL, since it denotes the protocol used in the request, and is not necessarily the protocol that will be used in the server's response. However, for compatibility with CGI we have to keep the existing name.) HTTP_ Variables Variables corresponding to the client-supplied HTTP request headers (i.e., variables whose names begin with "HTTP_"). The presence or absence of these variables should correspond with the presence or absence of the appropriate HTTP header in the request. A server or gateway should attempt to provide as many other CGI variables as are applicable. In addition, if SSL is in use, the server or gateway should also provide as many of the Apache SSL environment variables[5] as are applicable, such as HTTPS=on and SSL_PROTOCOL. Note, however, that an application that uses any CGI variables other than the ones listed above are necessarily non-portable to web servers that do not support the relevant extensions. (For example, web servers that do not publish files will not be able to provide a meaningful DOCUMENT_ROOT or PATH_TRANSLATED.) A WSGI-compliant server or gateway should document what variables it provides, along with their definitions as appropriate. Applications should check for the presence of any variables they require, and have a fallback plan in the event such a variable is absent. Note: missing variables (such as REMOTE_USER when no authentication has occurred) should be left out of the environ dictionary. Also note that CGI-defined variables must be native strings, if they are present at all. It is a violation of this specification for any CGI variable's value to be of any type other than str. In addition to the CGI-defined variables, the environ dictionary may also contain arbitrary operating-system "environment variables", and must contain the following WSGI-defined variables: +-------------------+-------------------------------------------------+ | Variable | Value | +===================+=================================================+ | wsgi.version | The tuple (1, 0), representing WSGI version | | | 1.0. | +-------------------+-------------------------------------------------+ | wsgi.url_scheme | A string representing the "scheme" portion of | | | the URL at which the application is being | | | invoked. Normally, this will have the value | | | "http" or "https", as appropriate. | +-------------------+-------------------------------------------------+ | wsgi.input | An input stream (file-like object) from which | | | the HTTP request body bytes can be read. (The | | | server or gateway may perform reads on-demand | | | as requested by the application, or it may | | | pre-read the client's request body and buffer | | | it in-memory or on disk, or use any other | | | technique for providing such an input stream, | | | according to its preference.) | +-------------------+-------------------------------------------------+ | wsgi.errors | An output stream (file-like object) to which | | | error output can be written, for the purpose of | | | recording program or other errors in a | | | standardized and possibly centralized location. | | | This should be a "text mode" stream; i.e., | | | applications should use "\n" as a line ending, | | | and assume that it will be converted to the | | | correct line ending by the server/gateway. | | | | | | (On platforms where the str type is unicode, | | | the error stream should accept and log | | | arbitrary unicode without raising an error; it | | | is allowed, however, to substitute characters | | | that cannot be rendered in the stream's | | | encoding.) | | | | | | For many servers, wsgi.errors will be the | | | server's main error log. Alternatively, this | | | may be sys.stderr, or a log file of some sort. | | | The server's documentation should include an | | | explanation of how to configure this or where | | | to find the recorded output. A server or | | | gateway may supply different error streams to | | | different applications, if this is desired. | +-------------------+-------------------------------------------------+ | wsgi.multithread | This value should evaluate true if the | | | application object may be simultaneously | | | invoked by another thread in the same process, | | | and should evaluate false otherwise. | +-------------------+-------------------------------------------------+ | wsgi.multiprocess | This value should evaluate true if an | | | equivalent application object may be | | | simultaneously invoked by another process, and | | | should evaluate false otherwise. | +-------------------+-------------------------------------------------+ | wsgi.run_once | This value should evaluate true if the server | | | or gateway expects (but does not guarantee!) | | | that the application will only be invoked this | | | one time during the life of its containing | | | process. Normally, this will only be true for a | | | gateway based on CGI (or something similar). | +-------------------+-------------------------------------------------+ Finally, the environ dictionary may also contain server-defined variables. These variables should be named using only lower-case letters, numbers, dots, and underscores, and should be prefixed with a name that is unique to the defining server or gateway. For example, mod_python might define variables with names like mod_python.some_variable. Input and Error Streams The input and error streams provided by the server must support the following methods: Method Stream Notes ----------------- -------- ------- read(size) input 1 readline() input 1, 2 readlines(hint) input 1, 3 __iter__() input flush() errors 4 write(str) errors writelines(seq) errors The semantics of each method are as documented in the Python Library Reference, except for these notes as listed in the table above: 1. The server is not required to read past the client's specified Content-Length, and should simulate an end-of-file condition if the application attempts to read past that point. The application should not attempt to read more data than is specified by the CONTENT_LENGTH variable. A server should allow read() to be called without an argument, and return the remainder of the client's input stream. A server should return empty bytestrings from any attempt to read from an empty or exhausted input stream. 2. Servers should support the optional "size" argument to readline(), but as in WSGI 1.0, they are allowed to omit support for it. (In WSGI 1.0, the size argument was not supported, on the grounds that it might have been complex to implement, and was not often used in practice... but then the cgi module started using it, and so practical servers had to start supporting it anyway!) 3. Note that the hint argument to readlines() is optional for both caller and implementer. The application is free not to supply it, and the server or gateway is free to ignore it. 4. Since the errors stream may not be rewound, servers and gateways are free to forward write operations immediately, without buffering. In this case, the flush() method may be a no-op. Portable applications, however, cannot assume that output is unbuffered or that flush() is a no-op. They must call flush() if they need to ensure that output has in fact been written. (For example, to minimize intermingling of data from multiple processes writing to the same error log.) The methods listed in the table above must be supported by all servers conforming to this specification. Applications conforming to this specification must not use any other methods or attributes of the input or errors objects. In particular, applications must not attempt to close these streams, even if they possess close() methods. The start_response() Callable The second parameter passed to the application object is a callable of the form start_response(status, response_headers, exc_info=None). (As with all WSGI callables, the arguments must be supplied positionally, not by keyword.) The start_response callable is used to begin the HTTP response, and it must return a write(body_data) callable (see the Buffering and Streaming section, below). The status argument is an HTTP "status" string like "200 OK" or "404 Not Found". That is, it is a string consisting of a Status-Code and a Reason-Phrase, in that order and separated by a single space, with no surrounding whitespace or other characters. (See 2616, Section 6.1.1 for more information.) The string must not contain control characters, and must not be terminated with a carriage return, linefeed, or combination thereof. The response_headers argument is a list of (header_name, header_value) tuples. It must be a Python list; i.e. type(response_headers) is ListType, and the server may change its contents in any way it desires. Each header_name must be a valid HTTP header field-name (as defined by 2616, Section 4.2), without a trailing colon or other punctuation. Each header_value must not include any control characters, including carriage returns or linefeeds, either embedded or at the end. (These requirements are to minimize the complexity of any parsing that must be performed by servers, gateways, and intermediate response processors that need to inspect or modify response headers.) In general, the server or gateway is responsible for ensuring that correct headers are sent to the client: if the application omits a header required by HTTP (or other relevant specifications that are in effect), the server or gateway must add it. For example, the HTTP Date: and Server: headers would normally be supplied by the server or gateway. (A reminder for server/gateway authors: HTTP header names are case-insensitive, so be sure to take that into consideration when examining application-supplied headers!) Applications and middleware are forbidden from using HTTP/1.1 "hop-by-hop" features or headers, any equivalent features in HTTP/1.0, or any headers that would affect the persistence of the client's connection to the web server. These features are the exclusive province of the actual web server, and a server or gateway should consider it a fatal error for an application to attempt sending them, and raise an error if they are supplied to start_response(). (For more specifics on "hop-by-hop" features and headers, please see the Other HTTP Features section below.) Servers should check for errors in the headers at the time start_response is called, so that an error can be raised while the application is still running. However, the start_response callable must not actually transmit the response headers. Instead, it must store them for the server or gateway to transmit only after the first iteration of the application return value that yields a non-empty bytestring, or upon the application's first invocation of the write() callable. In other words, response headers must not be sent until there is actual body data available, or until the application's returned iterable is exhausted. (The only possible exception to this rule is if the response headers explicitly include a Content-Length of zero.) This delaying of response header transmission is to ensure that buffered and asynchronous applications can replace their originally intended output with error output, up until the last possible moment. For example, the application may need to change the response status from "200 OK" to "500 Internal Error", if an error occurs while the body is being generated within an application buffer. The exc_info argument, if supplied, must be a Python sys.exc_info() tuple. This argument should be supplied by the application only if start_response is being called by an error handler. If exc_info is supplied, and no HTTP headers have been output yet, start_response should replace the currently-stored HTTP response headers with the newly-supplied ones, thus allowing the application to "change its mind" about the output when an error has occurred. However, if exc_info is provided, and the HTTP headers have already been sent, start_response must raise an error, and should re-raise using the exc_info tuple. That is: raise exc_info[1].with_traceback(exc_info[2]) This will re-raise the exception trapped by the application, and in principle should abort the application. (It is not safe for the application to attempt error output to the browser once the HTTP headers have already been sent.) The application must not trap any exceptions raised by start_response, if it called start_response with exc_info. Instead, it should allow such exceptions to propagate back to the server or gateway. See Error Handling below, for more details. The application may call start_response more than once, if and only if the exc_info argument is provided. More precisely, it is a fatal error to call start_response without the exc_info argument if start_response has already been called within the current invocation of the application. This includes the case where the first call to start_response raised an error. (See the example CGI gateway above for an illustration of the correct logic.) Note: servers, gateways, or middleware implementing start_response should ensure that no reference is held to the exc_info parameter beyond the duration of the function's execution, to avoid creating a circular reference through the traceback and frames involved. The simplest way to do this is something like: def start_response(status, response_headers, exc_info=None): if exc_info: try: # do stuff w/exc_info here finally: exc_info = None # Avoid circular ref. The example CGI gateway provides another illustration of this technique. Handling the Content-Length Header If the application supplies a Content-Length header, the server should not transmit more bytes to the client than the header allows, and should stop iterating over the response when enough data has been sent, or raise an error if the application tries to write() past that point. (Of course, if the application does not provide enough data to meet its stated Content-Length, the server should close the connection and log or otherwise report the error.) If the application does not supply a Content-Length header, a server or gateway may choose one of several approaches to handling it. The simplest of these is to close the client connection when the response is completed. Under some circumstances, however, the server or gateway may be able to either generate a Content-Length header, or at least avoid the need to close the client connection. If the application does not call the write() callable, and returns an iterable whose len() is 1, then the server can automatically determine Content-Length by taking the length of the first bytestring yielded by the iterable. And, if the server and client both support HTTP/1.1 "chunked encoding"<2616#section-3.6.1>, then the server may use chunked encoding to send a chunk for each write() call or bytestring yielded by the iterable, thus generating a Content-Length header for each chunk. This allows the server to keep the client connection alive, if it wishes to do so. Note that the server must comply fully with 2616 when doing this, or else fall back to one of the other strategies for dealing with the absence of Content-Length. (Note: applications and middleware must not apply any kind of Transfer-Encoding to their output, such as chunking or gzipping; as "hop-by-hop" operations, these encodings are the province of the actual web server/gateway. See Other HTTP Features below, for more details.) Buffering and Streaming Generally speaking, applications will achieve the best throughput by buffering their (modestly-sized) output and sending it all at once. This is a common approach in existing frameworks such as Zope: the output is buffered in a StringIO or similar object, then transmitted all at once, along with the response headers. The corresponding approach in WSGI is for the application to simply return a single-element iterable (such as a list) containing the response body as a single bytestring. This is the recommended approach for the vast majority of application functions, that render HTML pages whose text easily fits in memory. For large files, however, or for specialized uses of HTTP streaming (such as multipart "server push"), an application may need to provide output in smaller blocks (e.g. to avoid loading a large file into memory). It's also sometimes the case that part of a response may be time-consuming to produce, but it would be useful to send ahead the portion of the response that precedes it. In these cases, applications will usually return an iterator (often a generator-iterator) that produces the output in a block-by-block fashion. These blocks may be broken to coincide with multipart boundaries (for "server push"), or just before time-consuming tasks (such as reading another block of an on-disk file). WSGI servers, gateways, and middleware must not delay the transmission of any block; they must either fully transmit the block to the client, or guarantee that they will continue transmission even while the application is producing its next block. A server/gateway or middleware may provide this guarantee in one of three ways: 1. Send the entire block to the operating system (and request that any O/S buffers be flushed) before returning control to the application, OR 2. Use a different thread to ensure that the block continues to be transmitted while the application produces the next block. 3. (Middleware only) send the entire block to its parent gateway/server By providing this guarantee, WSGI allows applications to ensure that transmission will not become stalled at an arbitrary point in their output data. This is critical for proper functioning of e.g. multipart "server push" streaming, where data between multipart boundaries should be transmitted in full to the client. Middleware Handling of Block Boundaries In order to better support asynchronous applications and servers, middleware components must not block iteration waiting for multiple values from an application iterable. If the middleware needs to accumulate more data from the application before it can produce any output, it must yield an empty bytestring. To put this requirement another way, a middleware component must yield at least one value each time its underlying application yields a value. If the middleware cannot yield any other value, it must yield an empty bytestring. This requirement ensures that asynchronous applications and servers can conspire to reduce the number of threads that are required to run a given number of application instances simultaneously. Note also that this requirement means that middleware must return an iterable as soon as its underlying application returns an iterable. It is also forbidden for middleware to use the write() callable to transmit data that is yielded by an underlying application. Middleware may only use their parent server's write() callable to transmit data that the underlying application sent using a middleware-provided write() callable. The write() Callable Some existing application framework APIs support unbuffered output in a different manner than WSGI. Specifically, they provide a "write" function or method of some kind to write an unbuffered block of data, or else they provide a buffered "write" function and a "flush" mechanism to flush the buffer. Unfortunately, such APIs cannot be implemented in terms of WSGI's "iterable" application return value, unless threads or other special mechanisms are used. Therefore, to allow these frameworks to continue using an imperative API, WSGI includes a special write() callable, returned by the start_response callable. New WSGI applications and frameworks should not use the write() callable if it is possible to avoid doing so. The write() callable is strictly a hack to support imperative streaming APIs. In general, applications should produce their output via their returned iterable, as this makes it possible for web servers to interleave other tasks in the same Python thread, potentially providing better throughput for the server as a whole. The write() callable is returned by the start_response() callable, and it accepts a single parameter: a bytestring to be written as part of the HTTP response body, that is treated exactly as though it had been yielded by the output iterable. In other words, before write() returns, it must guarantee that the passed-in bytestring was either completely sent to the client, or that it is buffered for transmission while the application proceeds onward. An application must return an iterable object, even if it uses write() to produce all or part of its response body. The returned iterable may be empty (i.e. yield no non-empty bytestrings), but if it does yield non-empty bytestrings, that output must be treated normally by the server or gateway (i.e., it must be sent or queued immediately). Applications must not invoke write() from within their return iterable, and therefore any bytestrings yielded by the iterable are transmitted after all bytestrings passed to write() have been sent to the client. Unicode Issues HTTP does not directly support Unicode, and neither does this interface. All encoding/decoding must be handled by the application; all strings passed to or from the server must be of type str or bytes, never unicode. The result of using a unicode object where a string object is required, is undefined. Note also that strings passed to start_response() as a status or as response headers must follow 2616 with respect to encoding. That is, they must either be ISO-8859-1 characters, or use 2047 MIME encoding. On Python platforms where the str or StringType type is in fact Unicode-based (e.g. Jython, IronPython, Python 3, etc.), all "strings" referred to in this specification must contain only code points representable in ISO-8859-1 encoding (\u0000 through \u00FF, inclusive). It is a fatal error for an application to supply strings containing any other Unicode character or code point. Similarly, servers and gateways must not supply strings to an application containing any other Unicode characters. Again, all objects referred to in this specification as "strings" must be of type str or StringType, and must not be of type unicode or UnicodeType. And, even if a given platform allows for more than 8 bits per character in str/StringType objects, only the lower 8 bits may be used, for any value referred to in this specification as a "string". For values referred to in this specification as "bytestrings" (i.e., values read from wsgi.input, passed to write() or yielded by the application), the value must be of type bytes under Python 3, and str in earlier versions of Python. Error Handling In general, applications should try to trap their own, internal errors, and display a helpful message in the browser. (It is up to the application to decide what "helpful" means in this context.) However, to display such a message, the application must not have actually sent any data to the browser yet, or else it risks corrupting the response. WSGI therefore provides a mechanism to either allow the application to send its error message, or be automatically aborted: the exc_info argument to start_response. Here is an example of its use: try: # regular application code here status = "200 Froody" response_headers = [("content-type", "text/plain")] start_response(status, response_headers) return ["normal body goes here"] except: # XXX should trap runtime issues like MemoryError, KeyboardInterrupt # in a separate handler before this bare 'except:'... status = "500 Oops" response_headers = [("content-type", "text/plain")] start_response(status, response_headers, sys.exc_info()) return ["error body goes here"] If no output has been written when an exception occurs, the call to start_response will return normally, and the application will return an error body to be sent to the browser. However, if any output has already been sent to the browser, start_response will reraise the provided exception. This exception should not be trapped by the application, and so the application will abort. The server or gateway can then trap this (fatal) exception and abort the response. Servers should trap and log any exception that aborts an application or the iteration of its return value. If a partial response has already been written to the browser when an application error occurs, the server or gateway may attempt to add an error message to the output, if the already-sent headers indicate a text/* content type that the server knows how to modify cleanly. Some middleware may wish to provide additional exception handling services, or intercept and replace application error messages. In such cases, middleware may choose to not re-raise the exc_info supplied to start_response, but instead raise a middleware-specific exception, or simply return without an exception after storing the supplied arguments. This will then cause the application to return its error body iterable (or invoke write()), allowing the middleware to capture and modify the error output. These techniques will work as long as application authors: 1. Always provide exc_info when beginning an error response 2. Never trap errors raised by start_response when exc_info is being provided HTTP 1.1 Expect/Continue Servers and gateways that implement HTTP 1.1 must provide transparent support for HTTP 1.1's "expect/continue" mechanism. This may be done in any of several ways: 1. Respond to requests containing an Expect: 100-continue request with an immediate "100 Continue" response, and proceed normally. 2. Proceed with the request normally, but provide the application with a wsgi.input stream that will send the "100 Continue" response if/when the application first attempts to read from the input stream. The read request must then remain blocked until the client responds. 3. Wait until the client decides that the server does not support expect/continue, and sends the request body on its own. (This is suboptimal, and is not recommended.) Note that these behavior restrictions do not apply for HTTP 1.0 requests, or for requests that are not directed to an application object. For more information on HTTP 1.1 Expect/Continue, see 2616, sections 8.2.3 and 10.1.1. Other HTTP Features In general, servers and gateways should "play dumb" and allow the application complete control over its output. They should only make changes that do not alter the effective semantics of the application's response. It is always possible for the application developer to add middleware components to supply additional features, so server/gateway developers should be conservative in their implementation. In a sense, a server should consider itself to be like an HTTP "gateway server", with the application being an HTTP "origin server". (See 2616, section 1.3, for the definition of these terms.) However, because WSGI servers and applications do not communicate via HTTP, what 2616 calls "hop-by-hop" headers do not apply to WSGI internal communications. WSGI applications must not generate any "hop-by-hop" headers <2616#section-13.5.1>, attempt to use HTTP features that would require them to generate such headers, or rely on the content of any incoming "hop-by-hop" headers in the environ dictionary. WSGI servers must handle any supported inbound "hop-by-hop" headers on their own, such as by decoding any inbound Transfer-Encoding, including chunked encoding if applicable. Applying these principles to a variety of HTTP features, it should be clear that a server may handle cache validation via the If-None-Match and If-Modified-Since request headers and the Last-Modified and ETag response headers. However, it is not required to do this, and the application should perform its own cache validation if it wants to support that feature, since the server/gateway is not required to do such validation. Similarly, a server may re-encode or transport-encode an application's response, but the application should use a suitable content encoding on its own, and must not apply a transport encoding. A server may transmit byte ranges of the application's response if requested by the client, and the application doesn't natively support byte ranges. Again, however, the application should perform this function on its own if desired. Note that these restrictions on applications do not necessarily mean that every application must reimplement every HTTP feature; many HTTP features can be partially or fully implemented by middleware components, thus freeing both server and application authors from implementing the same features over and over again. Thread Support Thread support, or lack thereof, is also server-dependent. Servers that can run multiple requests in parallel, should also provide the option of running an application in a single-threaded fashion, so that applications or frameworks that are not thread-safe may still be used with that server. Implementation/Application Notes Server Extension APIs Some server authors may wish to expose more advanced APIs, that application or framework authors can use for specialized purposes. For example, a gateway based on mod_python might wish to expose part of the Apache API as a WSGI extension. In the simplest case, this requires nothing more than defining an environ variable, such as mod_python.some_api. But, in many cases, the possible presence of middleware can make this difficult. For example, an API that offers access to the same HTTP headers that are found in environ variables, might return different data if environ has been modified by middleware. In general, any extension API that duplicates, supplants, or bypasses some portion of WSGI functionality runs the risk of being incompatible with middleware components. Server/gateway developers should not assume that nobody will use middleware, because some framework developers specifically intend to organize or reorganize their frameworks to function almost entirely as middleware of various kinds. So, to provide maximum compatibility, servers and gateways that provide extension APIs that replace some WSGI functionality, must design those APIs so that they are invoked using the portion of the API that they replace. For example, an extension API to access HTTP request headers must require the application to pass in its current environ, so that the server/gateway may verify that HTTP headers accessible via the API have not been altered by middleware. If the extension API cannot guarantee that it will always agree with environ about the contents of HTTP headers, it must refuse service to the application, e.g. by raising an error, returning None instead of a header collection, or whatever is appropriate to the API. Similarly, if an extension API provides an alternate means of writing response data or headers, it should require the start_response callable to be passed in, before the application can obtain the extended service. If the object passed in is not the same one that the server/gateway originally supplied to the application, it cannot guarantee correct operation and must refuse to provide the extended service to the application. These guidelines also apply to middleware that adds information such as parsed cookies, form variables, sessions, and the like to environ. Specifically, such middleware should provide these features as functions which operate on environ, rather than simply stuffing values into environ. This helps ensure that information is calculated from environ after any middleware has done any URL rewrites or other environ modifications. It is very important that these "safe extension" rules be followed by both server/gateway and middleware developers, in order to avoid a future in which middleware developers are forced to delete any and all extension APIs from environ to ensure that their mediation isn't being bypassed by applications using those extensions! Application Configuration This specification does not define how a server selects or obtains an application to invoke. These and other configuration options are highly server-specific matters. It is expected that server/gateway authors will document how to configure the server to execute a particular application object, and with what options (such as threading options). Framework authors, on the other hand, should document how to create an application object that wraps their framework's functionality. The user, who has chosen both the server and the application framework, must connect the two together. However, since both the framework and the server now have a common interface, this should be merely a mechanical matter, rather than a significant engineering effort for each new server/framework pair. Finally, some applications, frameworks, and middleware may wish to use the environ dictionary to receive simple string configuration options. Servers and gateways should support this by allowing an application's deployer to specify name-value pairs to be placed in environ. In the simplest case, this support can consist merely of copying all operating system-supplied environment variables from os.environ into the environ dictionary, since the deployer in principle can configure these externally to the server, or in the CGI case they may be able to be set via the server's configuration files. Applications should try to keep such required variables to a minimum, since not all servers will support easy configuration of them. Of course, even in the worst case, persons deploying an application can create a script to supply the necessary configuration values: from the_app import application def new_app(environ, start_response): environ['the_app.configval1'] = 'something' return application(environ, start_response) But, most existing applications and frameworks will probably only need a single configuration value from environ, to indicate the location of their application or framework-specific configuration file(s). (Of course, applications should cache such configuration, to avoid having to re-read it upon each invocation.) URL Reconstruction If an application wishes to reconstruct a request's complete URL, it may do so using the following algorithm, contributed by Ian Bicking: from urllib.parse import quote url = environ['wsgi.url_scheme']+'://' if environ.get('HTTP_HOST'): url += environ['HTTP_HOST'] else: url += environ['SERVER_NAME'] if environ['wsgi.url_scheme'] == 'https': if environ['SERVER_PORT'] != '443': url += ':' + environ['SERVER_PORT'] else: if environ['SERVER_PORT'] != '80': url += ':' + environ['SERVER_PORT'] url += quote(environ.get('SCRIPT_NAME', '')) url += quote(environ.get('PATH_INFO', '')) if environ.get('QUERY_STRING'): url += '?' + environ['QUERY_STRING'] Note that such a reconstructed URL may not be precisely the same URI as requested by the client. Server rewrite rules, for example, may have modified the client's originally requested URL to place it in a canonical form. Supporting Older (<2.2) Versions of Python Some servers, gateways, or applications may wish to support older (<2.2) versions of Python. This is especially important if Jython is a target platform, since as of this writing a production-ready version of Jython 2.2 is not yet available. For servers and gateways, this is relatively straightforward: servers and gateways targeting pre-2.2 versions of Python must simply restrict themselves to using only a standard "for" loop to iterate over any iterable returned by an application. This is the only way to ensure source-level compatibility with both the pre-2.2 iterator protocol (discussed further below) and "today's" iterator protocol (see PEP 234). (Note that this technique necessarily applies only to servers, gateways, or middleware that are written in Python. Discussion of how to use iterator protocol(s) correctly from other languages is outside the scope of this PEP.) For applications, supporting pre-2.2 versions of Python is slightly more complex: - You may not return a file object and expect it to work as an iterable, since before Python 2.2, files were not iterable. (In general, you shouldn't do this anyway, because it will perform quite poorly most of the time!) Use wsgi.file_wrapper or an application-specific file wrapper class. (See Optional Platform-Specific File Handling for more on wsgi.file_wrapper, and an example class you can use to wrap a file as an iterable.) - If you return a custom iterable, it must implement the pre-2.2 iterator protocol. That is, provide a __getitem__ method that accepts an integer key, and raises IndexError when exhausted. (Note that built-in sequence types are also acceptable, since they also implement this protocol.) Finally, middleware that wishes to support pre-2.2 versions of Python, and iterates over application return values or itself returns an iterable (or both), must follow the appropriate recommendations above. (Note: It should go without saying that to support pre-2.2 versions of Python, any server, gateway, application, or middleware must also use only language features available in the target version, use 1 and 0 instead of True and False, etc.) Optional Platform-Specific File Handling Some operating environments provide special high-performance file-transmission facilities, such as the Unix sendfile() call. Servers and gateways may expose this functionality via an optional wsgi.file_wrapper key in the environ. An application may use this "file wrapper" to convert a file or file-like object into an iterable that it then returns, e.g.: if 'wsgi.file_wrapper' in environ: return environ['wsgi.file_wrapper'](filelike, block_size) else: return iter(lambda: filelike.read(block_size), '') If the server or gateway supplies wsgi.file_wrapper, it must be a callable that accepts one required positional parameter, and one optional positional parameter. The first parameter is the file-like object to be sent, and the second parameter is an optional block size "suggestion" (which the server/gateway need not use). The callable must return an iterable object, and must not perform any data transmission until and unless the server/gateway actually receives the iterable as a return value from the application. (To do otherwise would prevent middleware from being able to interpret or override the response data.) To be considered "file-like", the object supplied by the application must have a read() method that takes an optional size argument. It may have a close() method, and if so, the iterable returned by wsgi.file_wrapper must have a close() method that invokes the original file-like object's close() method. If the "file-like" object has any other methods or attributes with names matching those of Python built-in file objects (e.g. fileno()), the wsgi.file_wrapper may assume that these methods or attributes have the same semantics as those of a built-in file object. The actual implementation of any platform-specific file handling must occur after the application returns, and the server or gateway checks to see if a wrapper object was returned. (Again, because of the presence of middleware, error handlers, and the like, it is not guaranteed that any wrapper created will actually be used.) Apart from the handling of close(), the semantics of returning a file wrapper from the application should be the same as if the application had returned iter(filelike.read, ''). In other words, transmission should begin at the current position within the "file" at the time that transmission begins, and continue until the end is reached, or until Content-Length bytes have been written. (If the application doesn't supply a Content-Length, the server may generate one from the file using its knowledge of the underlying file implementation.) Of course, platform-specific file transmission APIs don't usually accept arbitrary "file-like" objects. Therefore, a wsgi.file_wrapper has to introspect the supplied object for things such as a fileno() (Unix-like OSes) or a java.nio.FileChannel (under Jython) in order to determine if the file-like object is suitable for use with the platform-specific API it supports. Note that even if the object is not suitable for the platform API, the wsgi.file_wrapper must still return an iterable that wraps read() and close(), so that applications using file wrappers are portable across platforms. Here's a simple platform-agnostic file wrapper class, suitable for old (pre 2.2) and new Pythons alike: class FileWrapper: def __init__(self, filelike, blksize=8192): self.filelike = filelike self.blksize = blksize if hasattr(filelike, 'close'): self.close = filelike.close def __getitem__(self, key): data = self.filelike.read(self.blksize) if data: return data raise IndexError and here is a snippet from a server/gateway that uses it to provide access to a platform-specific API: environ['wsgi.file_wrapper'] = FileWrapper result = application(environ, start_response) try: if isinstance(result, FileWrapper): # check if result.filelike is usable w/platform-specific # API, and if so, use that API to transmit the result. # If not, fall through to normal iterable handling # loop below. for data in result: # etc. finally: if hasattr(result, 'close'): result.close() Questions and Answers 1. Why must environ be a dictionary? What's wrong with using a subclass? The rationale for requiring a dictionary is to maximize portability between servers. The alternative would be to define some subset of a dictionary's methods as being the standard and portable interface. In practice, however, most servers will probably find a dictionary adequate to their needs, and thus framework authors will come to expect the full set of dictionary features to be available, since they will be there more often than not. But, if some server chooses not to use a dictionary, then there will be interoperability problems despite that server's "conformance" to spec. Therefore, making a dictionary mandatory simplifies the specification and guarantees interoperability. Note that this does not prevent server or framework developers from offering specialized services as custom variables inside the environ dictionary. This is the recommended approach for offering any such value-added services. 2. Why can you call write() and yield bytestrings/return an iterable? Shouldn't we pick just one way? If we supported only the iteration approach, then current frameworks that assume the availability of "push" suffer. But, if we only support pushing via write(), then server performance suffers for transmission of e.g. large files (if a worker thread can't begin work on a new request until all of the output has been sent). Thus, this compromise allows an application framework to support both approaches, as appropriate, but with only a little more burden to the server implementor than a push-only approach would require. 3. What's the close() for? When writes are done during the execution of an application object, the application can ensure that resources are released using a try/finally block. But, if the application returns an iterable, any resources used will not be released until the iterable is garbage collected. The close() idiom allows an application to release critical resources at the end of a request, and it's forward-compatible with the support for try/finally in generators that's proposed by PEP 325. 4. Why is this interface so low-level? I want feature X! (e.g. cookies, sessions, persistence, ...) This isn't Yet Another Python Web Framework. It's just a way for frameworks to talk to web servers, and vice versa. If you want these features, you need to pick a web framework that provides the features you want. And if that framework lets you create a WSGI application, you should be able to run it in most WSGI-supporting servers. Also, some WSGI servers may offer additional services via objects provided in their environ dictionary; see the applicable server documentation for details. (Of course, applications that use such extensions will not be portable to other WSGI-based servers.) 5. Why use CGI variables instead of good old HTTP headers? And why mix them in with WSGI-defined variables? Many existing web frameworks are built heavily upon the CGI spec, and existing web servers know how to generate CGI variables. In contrast, alternative ways of representing inbound HTTP information are fragmented and lack market share. Thus, using the CGI "standard" seems like a good way to leverage existing implementations. As for mixing them with WSGI variables, separating them would just require two dictionary arguments to be passed around, while providing no real benefits. 6. What about the status string? Can't we just use the number, passing in 200 instead of "200 OK"? Doing this would complicate the server or gateway, by requiring them to have a table of numeric statuses and corresponding messages. By contrast, it is easy for an application or framework author to type the extra text to go with the specific response code they are using, and existing frameworks often already have a table containing the needed messages. So, on balance it seems better to make the application/framework responsible, rather than the server or gateway. 7. Why is wsgi.run_once not guaranteed to run the app only once? Because it's merely a suggestion to the application that it should "rig for infrequent running". This is intended for application frameworks that have multiple modes of operation for caching, sessions, and so forth. In a "multiple run" mode, such frameworks may preload caches, and may not write e.g. logs or session data to disk after each request. In "single run" mode, such frameworks avoid preloading and flush all necessary writes after each request. However, in order to test an application or framework to verify correct operation in the latter mode, it may be necessary (or at least expedient) to invoke it more than once. Therefore, an application should not assume that it will definitely not be run again, just because it is called with wsgi.run_once set to True. 8. Feature X (dictionaries, callables, etc.) are ugly for use in application code; why don't we use objects instead? All of these implementation choices of WSGI are specifically intended to decouple features from one another; recombining these features into encapsulated objects makes it somewhat harder to write servers or gateways, and an order of magnitude harder to write middleware that replaces or modifies only small portions of the overall functionality. In essence, middleware wants to have a "Chain of Responsibility" pattern, whereby it can act as a "handler" for some functions, while allowing others to remain unchanged. This is difficult to do with ordinary Python objects, if the interface is to remain extensible. For example, one must use __getattr__ or __getattribute__ overrides, to ensure that extensions (such as attributes defined by future WSGI versions) are passed through. This type of code is notoriously difficult to get 100% correct, and few people will want to write it themselves. They will therefore copy other people's implementations, but fail to update them when the person they copied from corrects yet another corner case. Further, this necessary boilerplate would be pure excise, a developer tax paid by middleware developers to support a slightly prettier API for application framework developers. But, application framework developers will typically only be updating one framework to support WSGI, and in a very limited part of their framework as a whole. It will likely be their first (and maybe their only) WSGI implementation, and thus they will likely implement with this specification ready to hand. Thus, the effort of making the API "prettier" with object attributes and suchlike would likely be wasted for this audience. We encourage those who want a prettier (or otherwise improved) WSGI interface for use in direct web application programming (as opposed to web framework development) to develop APIs or frameworks that wrap WSGI for convenient use by application developers. In this way, WSGI can remain conveniently low-level for server and middleware authors, while not being "ugly" for application developers. Proposed/Under Discussion These items are currently being discussed on the Web-SIG and elsewhere, or are on the PEP author's "to-do" list: - Should wsgi.input be an iterator instead of a file? This would help for asynchronous applications and chunked-encoding input streams. - Optional extensions are being discussed for pausing iteration of an application's output until input is available or until a callback occurs. - Add a section about synchronous vs. asynchronous apps and servers, the relevant threading models, and issues/design goals in these areas. Acknowledgements Thanks go to the many folks on the Web-SIG mailing list whose thoughtful feedback made this revised draft possible. Especially: - Gregory "Grisha" Trubetskoy, author of mod_python, who beat up on the first draft as not offering any advantages over "plain old CGI", thus encouraging me to look for a better approach. - Ian Bicking, who helped nag me into properly specifying the multithreading and multiprocess options, as well as badgering me to provide a mechanism for servers to supply custom extension data to an application. - Tony Lownds, who came up with the concept of a start_response function that took the status and headers, returning a write function. His input also guided the design of the exception handling facilities, especially in the area of allowing for middleware that overrides application error messages. - Alan Kennedy, whose courageous attempts to implement WSGI-on-Jython (well before the spec was finalized) helped to shape the "supporting older versions of Python" section, as well as the optional wsgi.file_wrapper facility, and some of the early bytes/unicode decisions. - Mark Nottingham, who reviewed the spec extensively for issues with HTTP RFC compliance, especially with regard to HTTP/1.1 features that I didn't even know existed until he pointed them out. - Graham Dumpleton, who worked tirelessly (even in the face of my laziness and stupidity) to get some sort of Python 3 version of WSGI out, who proposed the "native strings" vs. "byte strings" concept, and thoughtfully wrestled through a great many HTTP, wsgi.input, and other amendments. Most, if not all, of the credit for this new PEP belongs to him. References Copyright This document has been placed in the public domain. [1] Procedural issues regarding modifications to PEP 333 (https://mail.python.org/pipermail/python-dev/2010-September/104114.html) [2] SVN revision history for PEP 3333, showing differences from PEP 333 (http://svn.python.org/view/peps/trunk/pep-3333.txt?r1=84854&r2=HEAD) [3] The Python Wiki "Web Programming" topic (https://wiki.python.org/moin/WebProgramming) [4] The Common Gateway Interface Specification, v 1.1, 3rd Draft (https://datatracker.ietf.org/doc/html/draft-coar-cgi-v11-03) [5] mod_ssl Reference, "Environment Variables" (http://www.modssl.org/docs/2.8/ssl_reference.html#ToC25)
python-peps
2024-10-18T13:23:32.029621
2010-09-26T00:00:00
{ "license": "Public Domain", "url": "https://peps.python.org/pep-3333/", "authors": [ "Phillip J. Eby" ], "pep_number": "3333", "pandoc_version": "3.5" }
0293
PEP: 293 Title: Codec Error Handling Callbacks Version: $Revision$ Last-Modified: $Date$ Author: Walter Dörwald <[email protected]> Status: Final Type: Standards Track Content-Type: text/x-rst Created: 18-Jun-2002 Python-Version: 2.3 Post-History: 19-Jun-2002 Abstract This PEP aims at extending Python's fixed codec error handling schemes with a more flexible callback based approach. Python currently uses a fixed error handling for codec error handlers. This PEP describes a mechanism which allows Python to use function callbacks as error handlers. With these more flexible error handlers it is possible to add new functionality to existing codecs by e.g. providing fallback solutions or different encodings for cases where the standard codec mapping does not apply. Specification Currently the set of codec error handling algorithms is fixed to either "strict", "replace" or "ignore" and the semantics of these algorithms is implemented separately for each codec. The proposed patch will make the set of error handling algorithms extensible through a codec error handler registry which maps handler names to handler functions. This registry consists of the following two C functions: int PyCodec_RegisterError(const char *name, PyObject *error) PyObject *PyCodec_LookupError(const char *name) and their Python counterparts: codecs.register_error(name, error) codecs.lookup_error(name) PyCodec_LookupError raises a LookupError if no callback function has been registered under this name. Similar to the encoding name registry there is no way of unregistering callback functions or iterating through the available functions. The callback functions will be used in the following way by the codecs: when the codec encounters an encoding/decoding error, the callback function is looked up by name, the information about the error is stored in an exception object and the callback is called with this object. The callback returns information about how to proceed (or raises an exception). For encoding, the exception object will look like this: class UnicodeEncodeError(UnicodeError): def __init__(self, encoding, object, start, end, reason): UnicodeError.__init__(self, "encoding '%s' can't encode characters " + "in positions %d-%d: %s" % (encoding, start, end-1, reason)) self.encoding = encoding self.object = object self.start = start self.end = end self.reason = reason This type will be implemented in C with the appropriate setter and getter methods for the attributes, which have the following meaning: - encoding: The name of the encoding; - object: The original unicode object for which encode() has been called; - start: The position of the first unencodable character; - end: (The position of the last unencodable character)+1 (or the length of object, if all characters from start to the end of object are unencodable); - reason: The reason why object[start:end] couldn't be encoded. If object has consecutive unencodable characters, the encoder should collect those characters for one call to the callback if those characters can't be encoded for the same reason. The encoder is not required to implement this behaviour but may call the callback for every single character, but it is strongly suggested that the collecting method is implemented. The callback must not modify the exception object. If the callback does not raise an exception (either the one passed in, or a different one), it must return a tuple: (replacement, newpos) replacement is a unicode object that the encoder will encode and emit instead of the unencodable object[start:end] part, newpos specifies a new position within object, where (after encoding the replacement) the encoder will continue encoding. Negative values for newpos are treated as being relative to end of object. If newpos is out of bounds the encoder will raise an IndexError. If the replacement string itself contains an unencodable character the encoder raises the exception object (but may set a different reason string before raising). Should further encoding errors occur, the encoder is allowed to reuse the exception object for the next call to the callback. Furthermore, the encoder is allowed to cache the result of codecs.lookup_error. If the callback does not know how to handle the exception, it must raise a TypeError. Decoding works similar to encoding with the following differences: - The exception class is named UnicodeDecodeError and the attribute object is the original 8bit string that the decoder is currently decoding. - The decoder will call the callback with those bytes that constitute one undecodable sequence, even if there is more than one undecodable sequence that is undecodable for the same reason directly after the first one. E.g. for the "unicode-escape" encoding, when decoding the illegal string \\u00\\u01x, the callback will be called twice (once for \\u00 and once for \\u01). This is done to be able to generate the correct number of replacement characters. - The replacement returned from the callback is a unicode object that will be emitted by the decoder as-is without further processing instead of the undecodable object[start:end] part. There is a third API that uses the old strict/ignore/replace error handling scheme: PyUnicode_TranslateCharmap/unicode.translate The proposed patch will enhance PyUnicode_TranslateCharmap, so that it also supports the callback registry. This has the additional side effect that PyUnicode_TranslateCharmap will support multi-character replacement strings (see SF feature request #403100[1]). For PyUnicode_TranslateCharmap the exception class will be named UnicodeTranslateError. PyUnicode_TranslateCharmap will collect all consecutive untranslatable characters (i.e. those that map to None) and call the callback with them. The replacement returned from the callback is a unicode object that will be put in the translated result as-is, without further processing. All encoders and decoders are allowed to implement the callback functionality themselves, if they recognize the callback name (i.e. if it is a system callback like "strict", "replace" and "ignore"). The proposed patch will add two additional system callback names: "backslashreplace" and "xmlcharrefreplace", which can be used for encoding and translating and which will also be implemented in-place for all encoders and PyUnicode_TranslateCharmap. The Python equivalent of these five callbacks will look like this: def strict(exc): raise exc def ignore(exc): if isinstance(exc, UnicodeError): return (u"", exc.end) else: raise TypeError("can't handle %s" % exc.__name__) def replace(exc): if isinstance(exc, UnicodeEncodeError): return ((exc.end-exc.start)*u"?", exc.end) elif isinstance(exc, UnicodeDecodeError): return (u"\\ufffd", exc.end) elif isinstance(exc, UnicodeTranslateError): return ((exc.end-exc.start)*u"\\ufffd", exc.end) else: raise TypeError("can't handle %s" % exc.__name__) def backslashreplace(exc): if isinstance(exc, (UnicodeEncodeError, UnicodeTranslateError)): s = u"" for c in exc.object[exc.start:exc.end]: if ord(c)<=0xff: s += u"\\x%02x" % ord(c) elif ord(c)<=0xffff: s += u"\\u%04x" % ord(c) else: s += u"\\U%08x" % ord(c) return (s, exc.end) else: raise TypeError("can't handle %s" % exc.__name__) def xmlcharrefreplace(exc): if isinstance(exc, (UnicodeEncodeError, UnicodeTranslateError)): s = u"" for c in exc.object[exc.start:exc.end]: s += u"&#%d;" % ord(c) return (s, exc.end) else: raise TypeError("can't handle %s" % exc.__name__) These five callback handlers will also be accessible to Python as codecs.strict_error, codecs.ignore_error, codecs.replace_error, codecs.backslashreplace_error and codecs.xmlcharrefreplace_error. Rationale Most legacy encoding do not support the full range of Unicode characters. For these cases many high level protocols support a way of escaping a Unicode character (e.g. Python itself supports the \x, \u and \U convention, XML supports character references via &#xxx; etc.). When implementing such an encoding algorithm, a problem with the current implementation of the encode method of Unicode objects becomes apparent: For determining which characters are unencodable by a certain encoding, every single character has to be tried, because encode does not provide any information about the location of the error(s), so # (1) us = u"xxx" s = us.encode(encoding) has to be replaced by # (2) us = u"xxx" v = [] for c in us: try: v.append(c.encode(encoding)) except UnicodeError: v.append("&#%d;" % ord(c)) s = "".join(v) This slows down encoding dramatically as now the loop through the string is done in Python code and no longer in C code. Furthermore, this solution poses problems with stateful encodings. For example, UTF-16 uses a Byte Order Mark at the start of the encoded byte string to specify the byte order. Using (2) with UTF-16, results in an 8 bit string with a BOM between every character. To work around this problem, a stream writer - which keeps state between calls to the encoding function - has to be used: # (3) us = u"xxx" import codecs, cStringIO as StringIO writer = codecs.getwriter(encoding) v = StringIO.StringIO() uv = writer(v) for c in us: try: uv.write(c) except UnicodeError: uv.write(u"&#%d;" % ord(c)) s = v.getvalue() To compare the speed of (1) and (3) the following test script has been used: # (4) import time us = u"äa"*1000000 encoding = "ascii" import codecs, cStringIO as StringIO t1 = time.time() s1 = us.encode(encoding, "replace") t2 = time.time() writer = codecs.getwriter(encoding) v = StringIO.StringIO() uv = writer(v) for c in us: try: uv.write(c) except UnicodeError: uv.write(u"?") s2 = v.getvalue() t3 = time.time() assert(s1==s2) print "1:", t2-t1 print "2:", t3-t2 print "factor:", (t3-t2)/(t2-t1) On Linux this gives the following output (with Python 2.3a0): 1: 0.274321913719 2: 51.1284689903 factor: 186.381278466 i.e. (3) is 180 times slower than (1). Callbacks must be stateless, because as soon as a callback is registered it is available globally and can be called by multiple encode() calls. To be able to use stateful callbacks, the errors parameter for encode/decode/translate would have to be changed from char * to PyObject *, so that the callback could be used directly, without the need to register the callback globally. As this requires changes to lots of C prototypes, this approach was rejected. Currently all encoding/decoding functions have arguments const Py_UNICODE *p, int size or const char *p, int size to specify the unicode characters/8bit characters to be encoded/decoded. So in case of an error the codec has to create a new unicode or str object from these parameters and store it in the exception object. The callers of these encoding/decoding functions extract these parameters from str/unicode objects themselves most of the time, so it could speed up error handling if these object were passed directly. As this again requires changes to many C functions, this approach has been rejected. For stream readers/writers the errors attribute must be changeable to be able to switch between different error handling methods during the lifetime of the stream reader/writer. This is currently the case for codecs.StreamReader and codecs.StreamWriter and all their subclasses. All core codecs and probably most of the third party codecs (e.g. JapaneseCodecs) derive their stream readers/writers from these classes so this already works, but the attribute errors should be documented as a requirement. Implementation Notes A sample implementation is available as SourceForge patch #432401 [2] including a script for testing the speed of various string/encoding/error combinations and a test script. Currently the new exception classes are old style Python classes. This means that accessing attributes results in a dict lookup. The C API is implemented in a way that makes it possible to switch to new style classes behind the scene, if Exception (and UnicodeError) will be changed to new style classes implemented in C for improved performance. The class codecs.StreamReaderWriter uses the errors parameter for both reading and writing. To be more flexible this should probably be changed to two separate parameters for reading and writing. The errors parameter of PyUnicode_TranslateCharmap is not availably to Python, which makes testing of the new functionality of PyUnicode_TranslateCharmap impossible with Python scripts. The patch should add an optional argument errors to unicode.translate to expose the functionality and make testing possible. Codecs that do something different than encoding/decoding from/to unicode and want to use the new machinery can define their own exception classes and the strict handlers will automatically work with it. The other predefined error handlers are unicode specific and expect to get a Unicode(Encode|Decode|Translate)Error exception object so they won't work. Backwards Compatibility The semantics of unicode.encode with errors="replace" has changed: The old version always stored a ? character in the output string even if no character was mapped to ? in the mapping. With the proposed patch, the replacement string from the callback will again be looked up in the mapping dictionary. But as all supported encodings are ASCII based, and thus map ? to ?, this should not be a problem in practice. Illegal values for the errors argument raised ValueError before, now they will raise LookupError. References Copyright This document has been placed in the public domain. [1] SF feature request #403100 "Multicharacter replacements in PyUnicode_TranslateCharmap" https://bugs.python.org/issue403100 [2] SF patch #432401 "unicode encoding error callbacks" https://bugs.python.org/issue432401
python-peps
2024-10-18T13:23:32.048242
2002-06-18T00:00:00
{ "license": "Public Domain", "url": "https://peps.python.org/pep-0293/", "authors": [ "Walter Dörwald" ], "pep_number": "0293", "pandoc_version": "3.5" }
0255
PEP: 255 Title: Simple Generators Author: Neil Schemenauer <[email protected]>, Tim Peters <[email protected]>, Magnus Lie Hetland <[email protected]> Status: Final Type: Standards Track Content-Type: text/x-rst Requires: 234 Created: 18-May-2001 Python-Version: 2.2 Post-History: 14-Jun-2001, 23-Jun-2001 Abstract This PEP introduces the concept of generators to Python, as well as a new statement used in conjunction with them, the yield statement. Motivation When a producer function has a hard enough job that it requires maintaining state between values produced, most programming languages offer no pleasant and efficient solution beyond adding a callback function to the producer's argument list, to be called with each value produced. For example, tokenize.py in the standard library takes this approach: the caller must pass a tokeneater function to tokenize(), called whenever tokenize() finds the next token. This allows tokenize to be coded in a natural way, but programs calling tokenize are typically convoluted by the need to remember between callbacks which token(s) were seen last. The tokeneater function in tabnanny.py is a good example of that, maintaining a state machine in global variables, to remember across callbacks what it has already seen and what it hopes to see next. This was difficult to get working correctly, and is still difficult for people to understand. Unfortunately, that's typical of this approach. An alternative would have been for tokenize to produce an entire parse of the Python program at once, in a large list. Then tokenize clients could be written in a natural way, using local variables and local control flow (such as loops and nested if statements) to keep track of their state. But this isn't practical: programs can be very large, so no a priori bound can be placed on the memory needed to materialize the whole parse; and some tokenize clients only want to see whether something specific appears early in the program (e.g., a future statement, or, as is done in IDLE, just the first indented statement), and then parsing the whole program first is a severe waste of time. Another alternative would be to make tokenize an iterator <234>, delivering the next token whenever its .next() method is invoked. This is pleasant for the caller in the same way a large list of results would be, but without the memory and "what if I want to get out early?" drawbacks. However, this shifts the burden on tokenize to remember its state between .next() invocations, and the reader need only glance at tokenize.tokenize_loop() to realize what a horrid chore that would be. Or picture a recursive algorithm for producing the nodes of a general tree structure: to cast that into an iterator framework requires removing the recursion manually and maintaining the state of the traversal by hand. A fourth option is to run the producer and consumer in separate threads. This allows both to maintain their states in natural ways, and so is pleasant for both. Indeed, Demo/threads/Generator.py in the Python source distribution provides a usable synchronized-communication class for doing that in a general way. This doesn't work on platforms without threads, though, and is very slow on platforms that do (compared to what is achievable without threads). A final option is to use the Stackless[1] (PEP 219) variant implementation of Python instead, which supports lightweight coroutines. This has much the same programmatic benefits as the thread option, but is much more efficient. However, Stackless is a controversial rethinking of the Python core, and it may not be possible for Jython to implement the same semantics. This PEP isn't the place to debate that, so suffice it to say here that generators provide a useful subset of Stackless functionality in a way that fits easily into the current CPython implementation, and is believed to be relatively straightforward for other Python implementations. That exhausts the current alternatives. Some other high-level languages provide pleasant solutions, notably iterators in Sather[2], which were inspired by iterators in CLU; and generators in Icon[3], a novel language where every expression is a generator. There are differences among these, but the basic idea is the same: provide a kind of function that can return an intermediate result ("the next value") to its caller, but maintaining the function's local state so that the function can be resumed again right where it left off. A very simple example: def fib(): a, b = 0, 1 while 1: yield b a, b = b, a+b When fib() is first invoked, it sets a to 0 and b to 1, then yields b back to its caller. The caller sees 1. When fib is resumed, from its point of view the yield statement is really the same as, say, a print statement: fib continues after the yield with all local state intact. a and b then become 1 and 1, and fib loops back to the yield, yielding 1 to its invoker. And so on. From fib's point of view it's just delivering a sequence of results, as if via callback. But from its caller's point of view, the fib invocation is an iterable object that can be resumed at will. As in the thread approach, this allows both sides to be coded in the most natural ways; but unlike the thread approach, this can be done efficiently and on all platforms. Indeed, resuming a generator should be no more expensive than a function call. The same kind of approach applies to many producer/consumer functions. For example, tokenize.py could yield the next token instead of invoking a callback function with it as argument, and tokenize clients could iterate over the tokens in a natural way: a Python generator is a kind of Python iterator <234>, but of an especially powerful kind. Specification: Yield A new statement is introduced: yield_stmt: "yield" expression_list yield is a new keyword, so a future statement (PEP 236) is needed to phase this in: in the initial release, a module desiring to use generators must include the line: from __future__ import generators near the top (see PEP 236) for details). Modules using the identifier yield without a future statement will trigger warnings. In the following release, yield will be a language keyword and the future statement will no longer be needed. The yield statement may only be used inside functions. A function that contains a yield statement is called a generator function. A generator function is an ordinary function object in all respects, but has the new CO_GENERATOR flag set in the code object's co_flags member. When a generator function is called, the actual arguments are bound to function-local formal argument names in the usual way, but no code in the body of the function is executed. Instead a generator-iterator object is returned; this conforms to the iterator protocol <234>, so in particular can be used in for-loops in a natural way. Note that when the intent is clear from context, the unqualified name "generator" may be used to refer either to a generator-function or a generator-iterator. Each time the .next() method of a generator-iterator is invoked, the code in the body of the generator-function is executed until a yield or return statement (see below) is encountered, or until the end of the body is reached. If a yield statement is encountered, the state of the function is frozen, and the value of expression_list is returned to .next()'s caller. By "frozen" we mean that all local state is retained, including the current bindings of local variables, the instruction pointer, and the internal evaluation stack: enough information is saved so that the next time .next() is invoked, the function can proceed exactly as if the yield statement were just another external call. Restriction: A yield statement is not allowed in the try clause of a try/finally construct. The difficulty is that there's no guarantee the generator will ever be resumed, hence no guarantee that the finally block will ever get executed; that's too much a violation of finally's purpose to bear. Restriction: A generator cannot be resumed while it is actively running: >>> def g(): ... i = me.next() ... yield i >>> me = g() >>> me.next() Traceback (most recent call last): ... File "<string>", line 2, in g ValueError: generator already executing Specification: Return A generator function can also contain return statements of the form: return Note that an expression_list is not allowed on return statements in the body of a generator (although, of course, they may appear in the bodies of non-generator functions nested within the generator). When a return statement is encountered, control proceeds as in any function return, executing the appropriate finally clauses (if any exist). Then a StopIteration exception is raised, signalling that the iterator is exhausted. A StopIteration exception is also raised if control flows off the end of the generator without an explicit return. Note that return means "I'm done, and have nothing interesting to return", for both generator functions and non-generator functions. Note that return isn't always equivalent to raising StopIteration: the difference lies in how enclosing try/except constructs are treated. For example,: >>> def f1(): ... try: ... return ... except: ... yield 1 >>> print list(f1()) [] because, as in any function, return simply exits, but: >>> def f2(): ... try: ... raise StopIteration ... except: ... yield 42 >>> print list(f2()) [42] because StopIteration is captured by a bare except, as is any exception. Specification: Generators and Exception Propagation If an unhandled exception-- including, but not limited to, StopIteration --is raised by, or passes through, a generator function, then the exception is passed on to the caller in the usual way, and subsequent attempts to resume the generator function raise StopIteration. In other words, an unhandled exception terminates a generator's useful life. Example (not idiomatic but to illustrate the point): >>> def f(): ... return 1/0 >>> def g(): ... yield f() # the zero division exception propagates ... yield 42 # and we'll never get here >>> k = g() >>> k.next() Traceback (most recent call last): File "<stdin>", line 1, in ? File "<stdin>", line 2, in g File "<stdin>", line 2, in f ZeroDivisionError: integer division or modulo by zero >>> k.next() # and the generator cannot be resumed Traceback (most recent call last): File "<stdin>", line 1, in ? StopIteration >>> Specification: Try/Except/Finally As noted earlier, yield is not allowed in the try clause of a try/finally construct. A consequence is that generators should allocate critical resources with great care. There is no restriction on yield otherwise appearing in finally clauses, except clauses, or in the try clause of a try/except construct: >>> def f(): ... try: ... yield 1 ... try: ... yield 2 ... 1/0 ... yield 3 # never get here ... except ZeroDivisionError: ... yield 4 ... yield 5 ... raise ... except: ... yield 6 ... yield 7 # the "raise" above stops this ... except: ... yield 8 ... yield 9 ... try: ... x = 12 ... finally: ... yield 10 ... yield 11 >>> print list(f()) [1, 2, 4, 5, 8, 9, 10, 11] >>> Example # A binary tree class. class Tree: def __init__(self, label, left=None, right=None): self.label = label self.left = left self.right = right def __repr__(self, level=0, indent=" "): s = level*indent + `self.label` if self.left: s = s + "\n" + self.left.__repr__(level+1, indent) if self.right: s = s + "\n" + self.right.__repr__(level+1, indent) return s def __iter__(self): return inorder(self) # Create a Tree from a list. def tree(list): n = len(list) if n == 0: return [] i = n / 2 return Tree(list[i], tree(list[:i]), tree(list[i+1:])) # A recursive generator that generates Tree labels in in-order. def inorder(t): if t: for x in inorder(t.left): yield x yield t.label for x in inorder(t.right): yield x # Show it off: create a tree. t = tree("ABCDEFGHIJKLMNOPQRSTUVWXYZ") # Print the nodes of the tree in in-order. for x in t: print x, print # A non-recursive generator. def inorder(node): stack = [] while node: while node.left: stack.append(node) node = node.left yield node.label while not node.right: try: node = stack.pop() except IndexError: return yield node.label node = node.right # Exercise the non-recursive generator. for x in t: print x, print Both output blocks display: A B C D E F G H I J K L M N O P Q R S T U V W X Y Z Q & A Why not a new keyword instead of reusing def? See BDFL Pronouncements section below. Why a new keyword for yield? Why not a builtin function instead? Control flow is much better expressed via keyword in Python, and yield is a control construct. It's also believed that efficient implementation in Jython requires that the compiler be able to determine potential suspension points at compile-time, and a new keyword makes that easy. The CPython reference implementation also exploits it heavily, to detect which functions are generator-functions (although a new keyword in place of def would solve that for CPython -- but people asking the "why a new keyword?" question don't want any new keyword). Then why not some other special syntax without a new keyword? For example, one of these instead of yield 3: return 3 and continue return and continue 3 return generating 3 continue return 3 return >> , 3 from generator return 3 return >> 3 return << 3 >> 3 << 3 * 3 Did I miss one <wink>? Out of hundreds of messages, I counted three suggesting such an alternative, and extracted the above from them. It would be nice not to need a new keyword, but nicer to make yield very clear -- I don't want to have to deduce that a yield is occurring from making sense of a previously senseless sequence of keywords or operators. Still, if this attracts enough interest, proponents should settle on a single consensus suggestion, and Guido will Pronounce on it. Why allow return at all? Why not force termination to be spelled raise StopIteration? The mechanics of StopIteration are low-level details, much like the mechanics of IndexError in Python 2.1: the implementation needs to do something well-defined under the covers, and Python exposes these mechanisms for advanced users. That's not an argument for forcing everyone to work at that level, though. return means "I'm done" in any kind of function, and that's easy to explain and to use. Note that return isn't always equivalent to raise StopIteration in try/except construct, either (see the "Specification: Return" section). Then why not allow an expression on return too? Perhaps we will someday. In Icon, return expr means both "I'm done", and "but I have one final useful value to return too, and this is it". At the start, and in the absence of compelling uses for return expr, it's simply cleaner to use yield exclusively for delivering values. BDFL Pronouncements Issue Introduce another new keyword (say, gen or generator) in place of def, or otherwise alter the syntax, to distinguish generator-functions from non-generator functions. Con In practice (how you think about them), generators are functions, but with the twist that they're resumable. The mechanics of how they're set up is a comparatively minor technical issue, and introducing a new keyword would unhelpfully overemphasize the mechanics of how generators get started (a vital but tiny part of a generator's life). Pro In reality (how you think about them), generator-functions are actually factory functions that produce generator-iterators as if by magic. In this respect they're radically different from non-generator functions, acting more like a constructor than a function, so reusing def is at best confusing. A yield statement buried in the body is not enough warning that the semantics are so different. BDFL def it stays. No argument on either side is totally convincing, so I have consulted my language designer's intuition. It tells me that the syntax proposed in the PEP is exactly right - not too hot, not too cold. But, like the Oracle at Delphi in Greek mythology, it doesn't tell me why, so I don't have a rebuttal for the arguments against the PEP syntax. The best I can come up with (apart from agreeing with the rebuttals ... already made) is "FUD". If this had been part of the language from day one, I very much doubt it would have made Andrew Kuchling's "Python Warts" page. Reference Implementation The current implementation, in a preliminary state (no docs, but well tested and solid), is part of Python's CVS development tree[4]. Using this requires that you build Python from source. This was derived from an earlier patch by Neil Schemenauer[5]. Footnotes and References Copyright This document has been placed in the public domain. [1] http://www.stackless.com/ [2] "Iteration Abstraction in Sather" Murer, Omohundro, Stoutamire and Szyperski http://www.icsi.berkeley.edu/~sather/Publications/toplas.html [3] http://www.cs.arizona.edu/icon/ [4] To experiment with this implementation, check out Python from CVS according to the instructions at http://sf.net/cvs/?group_id=5470 Note that the std test Lib/test/test_generators.py contains many examples, including all those in this PEP. [5] http://python.ca/nas/python/generator.diff
python-peps
2024-10-18T13:23:32.067769
2001-05-18T00:00:00
{ "license": "Public Domain", "url": "https://peps.python.org/pep-0255/", "authors": [ "Neil Schemenauer" ], "pep_number": "0255", "pandoc_version": "3.5" }
0433
PEP: 433 Title: Easier suppression of file descriptor inheritance Version: $Revision$ Last-Modified: $Date$ Author: Victor Stinner <[email protected]> Status: Superseded Type: Standards Track Content-Type: text/x-rst Created: 10-Jan-2013 Python-Version: 3.4 Superseded-By: 446 Abstract Add a new optional cloexec parameter on functions creating file descriptors, add different ways to change default values of this parameter, and add four new functions: - os.get_cloexec(fd) - os.set_cloexec(fd, cloexec=True) - sys.getdefaultcloexec() - sys.setdefaultcloexec(cloexec) Rationale A file descriptor has a close-on-exec flag which indicates if the file descriptor will be inherited or not. On UNIX, if the close-on-exec flag is set, the file descriptor is not inherited: it will be closed at the execution of child processes; otherwise the file descriptor is inherited by child processes. On Windows, if the close-on-exec flag is set, the file descriptor is not inherited; the file descriptor is inherited by child processes if the close-on-exec flag is cleared and if CreateProcess() is called with the bInheritHandles parameter set to TRUE (when subprocess.Popen is created with close_fds=False for example). Windows does not have "close-on-exec" flag but an inheritance flag which is just the opposite value. For example, setting close-on-exec flag means clearing the HANDLE_FLAG_INHERIT flag of a handle. Status in Python 3.3 On UNIX, the subprocess module closes file descriptors greater than 2 by default since Python 3.2[1]. All file descriptors created by the parent process are automatically closed in the child process. xmlrpc.server.SimpleXMLRPCServer sets the close-on-exec flag of the listening socket, the parent class socketserver.TCPServer does not set this flag. There are other cases creating a subprocess or executing a new program where file descriptors are not closed: functions of the os.spawn*() and the os.exec*() families and third party modules calling exec() or fork() + exec(). In this case, file descriptors are shared between the parent and the child processes which is usually unexpected and causes various issues. This PEP proposes to continue the work started with the change in the subprocess in Python 3.2, to fix the issue in any code, and not just code using subprocess. Inherited file descriptors issues Closing the file descriptor in the parent process does not close the related resource (file, socket, ...) because it is still open in the child process. The listening socket of TCPServer is not closed on exec(): the child process is able to get connection from new clients; if the parent closes the listening socket and create a new listening socket on the same address, it would get an "address already is used" error. Not closing file descriptors can lead to resource exhaustion: even if the parent closes all files, creating a new file descriptor may fail with "too many files" because files are still open in the child process. See also the following issues: - Issue #2320: Race condition in subprocess using stdin (2008) - Issue #3006: subprocess.Popen causes socket to remain open after close (2008) - Issue #7213: subprocess leaks open file descriptors between Popen instances causing hangs (2009) - Issue #12786: subprocess wait() hangs when stdin is closed (2011) Security Leaking file descriptors is a major security vulnerability. An untrusted child process can read sensitive data like passwords and take control of the parent process though leaked file descriptors. It is for example a known vulnerability to escape from a chroot. See also the CERT recommendation: FIO42-C. Ensure files are properly closed when they are no longer needed. Example of vulnerabilities: - OpenSSH Security Advisory: portable-keysign-rand-helper.adv (April 2011) - CWE-403: Exposure of File Descriptor to Unintended Control Sphere (2008) - Hijacking Apache https by mod_php (Dec 2003) - Apache: Apr should set FD_CLOEXEC if APR_FOPEN_NOCLEANUP is not set (fixed in 2009) - PHP: system() (and similar) don't cleanup opened handles of Apache (not fixed in January 2013) Atomicity Using fcntl() to set the close-on-exec flag is not safe in a multithreaded application. If a thread calls fork() and exec() between the creation of the file descriptor and the call to fcntl(fd, F_SETFD, new_flags): the file descriptor will be inherited by the child process. Modern operating systems offer functions to set the flag during the creation of the file descriptor, which avoids the race condition. Portability Python 3.2 added socket.SOCK_CLOEXEC flag, Python 3.3 added os.O_CLOEXEC flag and os.pipe2() function. It is already possible to set atomically close-on-exec flag in Python 3.3 when opening a file and creating a pipe or socket. The problem is that these flags and functions are not portable: only recent versions of operating systems support them. O_CLOEXEC and SOCK_CLOEXEC flags are ignored by old Linux versions and so FD_CLOEXEC flag must be checked using fcntl(fd, F_GETFD). If the kernel ignores O_CLOEXEC or SOCK_CLOEXEC flag, a call to fcntl(fd, F_SETFD, flags) is required to set close-on-exec flag. Note OpenBSD older 5.2 does not close the file descriptor with close-on-exec flag set if fork() is used before exec(), but it works correctly if exec() is called without fork(). Try openbsd_bug.py. Scope Applications still have to close explicitly file descriptors after a fork(). The close-on-exec flag only closes file descriptors after exec(), and so after fork() + exec(). This PEP only change the close-on-exec flag of file descriptors created by the Python standard library, or by modules using the standard library. Third party modules not using the standard library should be modified to conform to this PEP. The new os.set_cloexec() function can be used for example. Note See Close file descriptors after fork for a possible solution for fork() without exec(). Proposal Add a new optional cloexec parameter on functions creating file descriptors and different ways to change default value of this parameter. Add new functions: - os.get_cloexec(fd:int) -> bool: get the close-on-exec flag of a file descriptor. Not available on all platforms. - os.set_cloexec(fd:int, cloexec:bool=True): set or clear the close-on-exec flag on a file descriptor. Not available on all platforms. - sys.getdefaultcloexec() -> bool: get the current default value of the cloexec parameter - sys.setdefaultcloexec(cloexec: bool): set the default value of the cloexec parameter Add a new optional cloexec parameter to: - asyncore.dispatcher.create_socket() - io.FileIO - io.open() - open() - os.dup() - os.dup2() - os.fdopen() - os.open() - os.openpty() - os.pipe() - select.devpoll() - select.epoll() - select.kqueue() - socket.socket() - socket.socket.accept() - socket.socket.dup() - socket.socket.fromfd - socket.socketpair() The default value of the cloexec parameter is sys.getdefaultcloexec(). Add a new command line option -e and an environment variable PYTHONCLOEXEC to the set close-on-exec flag by default. subprocess clears the close-on-exec flag of file descriptors of the pass_fds parameter. All functions creating file descriptors in the standard library must respect the default value of the cloexec parameter: sys.getdefaultcloexec(). File descriptors 0 (stdin), 1 (stdout) and 2 (stderr) are expected to be inherited, but Python does not handle them differently. When os.dup2() is used to replace standard streams, cloexec=False must be specified explicitly. Drawbacks of the proposal: - It is not more possible to know if the close-on-exec flag will be set or not on a newly created file descriptor just by reading the source code. - If the inheritance of a file descriptor matters, the cloexec parameter must now be specified explicitly, or the library or the application will not work depending on the default value of the cloexec parameter. Alternatives Inheritance enabled by default, default no configurable Add a new optional parameter cloexec on functions creating file descriptors. The default value of the cloexec parameter is False, and this default cannot be changed. File descriptor inheritance enabled by default is also the default on POSIX and on Windows. This alternative is the most conservative option. This option does not solve issues listed in the Rationale section, it only provides a helper to fix them. All functions creating file descriptors have to be modified to set cloexec=True in each module used by an application to fix all these issues. Inheritance enabled by default, default can only be set to True This alternative is based on the proposal: the only difference is that sys.setdefaultcloexec() does not take any argument, it can only be used to set the default value of the cloexec parameter to True. Disable inheritance by default This alternative is based on the proposal: the only difference is that the default value of the cloexec parameter is True (instead of False). If a file must be inherited by child processes, cloexec=False parameter can be used. Advantages of setting close-on-exec flag by default: - There are far more programs that are bitten by FD inheritance upon exec (see Inherited file descriptors issues and Security) than programs relying on it (see Applications using inheritance of file descriptors). Drawbacks of setting close-on-exec flag by default: - It violates the principle of least surprise. Developers using the os module may expect that Python respects the POSIX standard and so that close-on-exec flag is not set by default. - The os module is written as a thin wrapper to system calls (to functions of the C standard library). If atomic flags to set close-on-exec flag are not supported (see Appendix: Operating system support), a single Python function call may call 2 or 3 system calls (see Performances section). - Extra system calls, if any, may slow down Python: see Performances. Backward compatibility: only a few programs rely on inheritance of file descriptors, and they only pass a few file descriptors, usually just one. These programs will fail immediately with EBADF error, and it will be simple to fix them: add cloexec=False parameter or use os.set_cloexec(fd, False). The subprocess module will be changed anyway to clear close-on-exec flag on file descriptors listed in the pass_fds parameter of Popen constructor. So it possible that these programs will not need any fix if they use the subprocess module. Close file descriptors after fork This PEP does not fix issues with applications using fork() without exec(). Python needs a generic process to register callbacks which would be called after a fork, see #16500: Add an atfork module. Such registry could be used to close file descriptors just after a fork(). Drawbacks: - It does not solve the problem on Windows: fork() does not exist on Windows - This alternative does not solve the problem for programs using exec() without fork(). - A third party module may call directly the C function fork() which will not call "atfork" callbacks. - All functions creating file descriptors must be changed to register a callback and then unregister their callback when the file is closed. Or a list of all open file descriptors must be maintained. - The operating system is a better place than Python to close automatically file descriptors. For example, it is not easy to avoid a race condition between closing the file and unregistering the callback closing the file. open(): add "e" flag to mode A new "e" mode would set close-on-exec flag (best-effort). This alternative only solves the problem for open(). socket.socket() and os.pipe() do not have a mode parameter for example. Since its version 2.7, the GNU libc supports "e" flag for fopen(). It uses O_CLOEXEC if available, or use fcntl(fd, F_SETFD, FD_CLOEXEC). With Visual Studio, fopen() accepts a "N" flag which uses O_NOINHERIT. Bikeshedding on the name of the new parameter - inherit, inherited: closer to Windows definition - sensitive - sterile: "Does not produce offspring." Applications using inheritance of file descriptors Most developers don't know that file descriptors are inherited by default. Most programs do not rely on inheritance of file descriptors. For example, subprocess.Popen was changed in Python 3.2 to close all file descriptors greater than 2 in the child process by default. No user complained about this behavior change yet. Network servers using fork may want to pass the client socket to the child process. For example, on UNIX a CGI server pass the socket client through file descriptors 0 (stdin) and 1 (stdout) using dup2(). To access a restricted resource like creating a socket listening on a TCP port lower than 1024 or reading a file containing sensitive data like passwords, a common practice is: start as the root user, create a file descriptor, create a child process, drop privileges (ex: change the current user), pass the file descriptor to the child process and exit the parent process. Security is very important in such use case: leaking another file descriptor would be a critical security vulnerability (see Security). The root process may not exit but monitors the child process instead, and restarts a new child process and pass the same file descriptor if the previous child process crashed. Example of programs taking file descriptors from the parent process using a command line option: - gpg: --status-fd <fd>, --logger-fd <fd>, etc. - openssl: -pass fd:<fd> - qemu: -add-fd <fd> - valgrind: --log-fd=<fd>, --input-fd=<fd>, etc. - xterm: -S <fd> On Linux, it is possible to use "/dev/fd/<fd>" filename to pass a file descriptor to a program expecting a filename. Performances Setting close-on-exec flag may require additional system calls for each creation of new file descriptors. The number of additional system calls depends on the method used to set the flag: - O_NOINHERIT: no additional system call - O_CLOEXEC: one additional system call, but only at the creation of the first file descriptor, to check if the flag is supported. If the flag is not supported, Python has to fallback to the next method. - ioctl(fd, FIOCLEX): one additional system call per file descriptor - fcntl(fd, F_SETFD, flags): two additional system calls per file descriptor, one to get old flags and one to set new flags On Linux, setting the close-on-flag has a low overhead on performances. Results of bench_cloexec.py on Linux 3.6: - close-on-flag not set: 7.8 us - O_CLOEXEC: 1% slower (7.9 us) - ioctl(): 3% slower (8.0 us) - fcntl(): 3% slower (8.0 us) Implementation os.get_cloexec(fd) Get the close-on-exec flag of a file descriptor. Pseudo-code: if os.name == 'nt': def get_cloexec(fd): handle = _winapi._get_osfhandle(fd); flags = _winapi.GetHandleInformation(handle) return not(flags & _winapi.HANDLE_FLAG_INHERIT) else: try: import fcntl except ImportError: pass else: def get_cloexec(fd): flags = fcntl.fcntl(fd, fcntl.F_GETFD) return bool(flags & fcntl.FD_CLOEXEC) os.set_cloexec(fd, cloexec=True) Set or clear the close-on-exec flag on a file descriptor. The flag is set after the creation of the file descriptor and so it is not atomic. Pseudo-code: if os.name == 'nt': def set_cloexec(fd, cloexec=True): handle = _winapi._get_osfhandle(fd); mask = _winapi.HANDLE_FLAG_INHERIT if cloexec: flags = 0 else: flags = mask _winapi.SetHandleInformation(handle, mask, flags) else: fnctl = None ioctl = None try: import ioctl except ImportError: try: import fcntl except ImportError: pass if ioctl is not None and hasattr('FIOCLEX', ioctl): def set_cloexec(fd, cloexec=True): if cloexec: ioctl.ioctl(fd, ioctl.FIOCLEX) else: ioctl.ioctl(fd, ioctl.FIONCLEX) elif fnctl is not None: def set_cloexec(fd, cloexec=True): flags = fcntl.fcntl(fd, fcntl.F_GETFD) if cloexec: flags |= FD_CLOEXEC else: flags &= ~FD_CLOEXEC fcntl.fcntl(fd, fcntl.F_SETFD, flags) ioctl is preferred over fcntl because it requires only one syscall, instead of two syscalls for fcntl. Note fcntl(fd, F_SETFD, flags) only supports one flag (FD_CLOEXEC), so it would be possible to avoid fcntl(fd, F_GETFD). But it may drop other flags in the future, and so it is safer to keep the two functions calls. Note fopen() function of the GNU libc ignores the error if fcntl(fd, F_SETFD, flags) failed. open() - Windows: open() with O_NOINHERIT flag [atomic] - open() with O_CLOEXEC flag [atomic] - open() + os.set_cloexec(fd, True) [best-effort] os.dup() - Windows: DuplicateHandle() [atomic] - fcntl(fd, F_DUPFD_CLOEXEC) [atomic] - dup() + os.set_cloexec(fd, True) [best-effort] os.dup2() - fcntl(fd, F_DUP2FD_CLOEXEC, fd2) [atomic] - dup3() with O_CLOEXEC flag [atomic] - dup2() + os.set_cloexec(fd, True) [best-effort] os.pipe() - Windows: CreatePipe() with SECURITY_ATTRIBUTES.bInheritHandle=TRUE, or _pipe() with O_NOINHERIT flag [atomic] - pipe2() with O_CLOEXEC flag [atomic] - pipe() + os.set_cloexec(fd, True) [best-effort] socket.socket() - Windows: WSASocket() with WSA_FLAG_NO_HANDLE_INHERIT flag [atomic] - socket() with SOCK_CLOEXEC flag [atomic] - socket() + os.set_cloexec(fd, True) [best-effort] socket.socketpair() - socketpair() with SOCK_CLOEXEC flag [atomic] - socketpair() + os.set_cloexec(fd, True) [best-effort] socket.socket.accept() - accept4() with SOCK_CLOEXEC flag [atomic] - accept() + os.set_cloexec(fd, True) [best-effort] Backward compatibility There is no backward incompatible change. The default behaviour is unchanged: the close-on-exec flag is not set by default. Appendix: Operating system support Windows Windows has an O_NOINHERIT flag: "Do not inherit in child processes". For example, it is supported by open() and _pipe(). The flag can be cleared using SetHandleInformation(fd, HANDLE_FLAG_INHERIT, 0). CreateProcess() has an bInheritHandles parameter: if it is FALSE, the handles are not inherited. If it is TRUE, handles with HANDLE_FLAG_INHERIT flag set are inherited. subprocess.Popen uses close_fds option to define bInheritHandles. ioctl Functions: - ioctl(fd, FIOCLEX, 0): set the close-on-exec flag - ioctl(fd, FIONCLEX, 0): clear the close-on-exec flag Availability: Linux, Mac OS X, QNX, NetBSD, OpenBSD, FreeBSD. fcntl Functions: - flags = fcntl(fd, F_GETFD); fcntl(fd, F_SETFD, flags | FD_CLOEXEC): set the close-on-exec flag - flags = fcntl(fd, F_GETFD); fcntl(fd, F_SETFD, flags & ~FD_CLOEXEC): clear the close-on-exec flag Availability: AIX, Digital UNIX, FreeBSD, HP-UX, IRIX, Linux, Mac OS X, OpenBSD, Solaris, SunOS, Unicos. Atomic flags New flags: - O_CLOEXEC: available on Linux (2.6.23), FreeBSD (8.3), OpenBSD 5.0, Solaris 11, QNX, BeOS, next NetBSD release (6.1?). This flag is part of POSIX.1-2008. - SOCK_CLOEXEC flag for socket() and socketpair(), available on Linux 2.6.27, OpenBSD 5.2, NetBSD 6.0. - WSA_FLAG_NO_HANDLE_INHERIT flag for WSASocket(): supported on Windows 7 with SP1, Windows Server 2008 R2 with SP1, and later - fcntl(): F_DUPFD_CLOEXEC flag, available on Linux 2.6.24, OpenBSD 5.0, FreeBSD 9.1, NetBSD 6.0, Solaris 11. This flag is part of POSIX.1-2008. - fcntl(): F_DUP2FD_CLOEXEC flag, available on FreeBSD 9.1 and Solaris 11. - recvmsg(): MSG_CMSG_CLOEXEC, available on Linux 2.6.23, NetBSD 6.0. On Linux older than 2.6.23, O_CLOEXEC flag is simply ignored. So we have to check that the flag is supported by calling fcntl(). If it does not work, we have to set the flag using ioctl() or fcntl(). On Linux older than 2.6.27, if the SOCK_CLOEXEC flag is set in the socket type, socket() or socketpair() fail and errno is set to EINVAL. On Windows XPS3, WSASocket() with WSAEPROTOTYPE when WSA_FLAG_NO_HANDLE_INHERIT flag is used. New functions: - dup3(): available on Linux 2.6.27 (and glibc 2.9) - pipe2(): available on Linux 2.6.27 (and glibc 2.9) - accept4(): available on Linux 2.6.28 (and glibc 2.10) If accept4() is called on Linux older than 2.6.28, accept4() returns -1 (fail) and errno is set to ENOSYS. Links Links: - Secure File Descriptor Handling (Ulrich Drepper, 2008) - win32_support.py of the Tornado project: emulate fcntl(fd, F_SETFD, FD_CLOEXEC) using SetHandleInformation(fd, HANDLE_FLAG_INHERIT, 1) - LKML: [PATCH] nextfd(2) Python issues: - #10115: Support accept4() for atomic setting of flags at socket creation - #12105: open() does not able to set flags, such as O_CLOEXEC - #12107: TCP listening sockets created without FD_CLOEXEC flag - #16500: Add an atfork module - #16850: Add "e" mode to open(): close-and-exec (O_CLOEXEC) / O_NOINHERIT - #16860: Use O_CLOEXEC in the tempfile module - #17036: Implementation of the PEP 433 - #16946: subprocess: _close_open_fd_range_safe() does not set close-on-exec flag on Linux < 2.6.23 if O_CLOEXEC is defined <http://bugs.python.org/issue16946> - #17070: PEP 433: Use the new cloexec to improve security and avoid bugs Other languages: - Perl sets the close-on-exec flag on newly created file descriptor if their number is greater than $SYSTEM_FD_MAX ($^F). See $SYSTEM_FD_MAX documentation. Perl does this since the creation of Perl (it was already present in Perl 1). - Ruby: Set FD_CLOEXEC for all fds (except 0, 1, 2) - Ruby: O_CLOEXEC flag missing for Kernel::open: the commit was reverted later - OCaml: PR#5256: Processes opened using Unix.open_process* inherit all opened file descriptors (including sockets). OCaml has a Unix.set_close_on_exec function. Footnotes Copyright This document has been placed in the public domain. Local Variables: mode: indented-text indent-tabs-mode: nil sentence-end-double-space: t fill-column: 70 coding: utf-8 End: [1] On UNIX since Python 3.2, subprocess.Popen() closes all file descriptors by default: close_fds=True. It closes file descriptors in range 3 inclusive to local_max_fd exclusive, where local_max_fd is fcntl(0, F_MAXFD) on NetBSD, or sysconf(_SC_OPEN_MAX) otherwise. If the error pipe has a descriptor smaller than 3, ValueError is raised.
python-peps
2024-10-18T13:23:32.117366
2013-01-10T00:00:00
{ "license": "Public Domain", "url": "https://peps.python.org/pep-0433/", "authors": [ "Victor Stinner" ], "pep_number": "0433", "pandoc_version": "3.5" }
0661
PEP: 661 Title: Sentinel Values Author: Tal Einat <[email protected]> Discussions-To: https://discuss.python.org/t/pep-661-sentinel-values/9126 Status: Draft Type: Standards Track Content-Type: text/x-rst Created: 06-Jun-2021 Post-History: 20-May-2021, 06-Jun-2021 TL;DR: See the Specification and Reference Implementation. Abstract Unique placeholder values, commonly known as "sentinel values", are common in programming. They have many uses, such as for: - Default values for function arguments, for when a value was not given: def foo(value=None): ... - Return values from functions when something is not found or unavailable: >>> "abc".find("d") -1 - Missing data, such as NULL in relational databases or "N/A" ("not available") in spreadsheets Python has the special value None, which is intended to be used as such a sentinel value in most cases. However, sometimes an alternative sentinel value is needed, usually when it needs to be distinct from None since None is a valid value in that context. Such cases are common enough that several idioms for implementing such sentinels have arisen over the years, but uncommon enough that there hasn't been a clear need for standardization. However, the common implementations, including some in the stdlib, suffer from several significant drawbacks. This PEP proposes adding a utility for defining sentinel values, to be used in the stdlib and made publicly available as part of the stdlib. Note: Changing all existing sentinels in the stdlib to be implemented this way is not deemed necessary, and whether to do so is left to the discretion of the maintainers. Motivation In May 2021, a question was brought up on the python-dev mailing list [1] about how to better implement a sentinel value for traceback.print_exception. The existing implementation used the following common idiom: _sentinel = object() However, this object has an uninformative and overly verbose repr, causing the function's signature to be overly long and hard to read: >>> help(traceback.print_exception) Help on function print_exception in module traceback: print_exception(exc, /, value=<object object at 0x000002825DF09650>, tb=<object object at 0x000002825DF09650>, limit=None, file=None, chain=True) Additionally, two other drawbacks of many existing sentinels were brought up in the discussion: 1. Some do not have a distinct type, hence it is impossible to define clear type signatures for functions with such sentinels as default values. 2. They behave unexpectedly after being copied or unpickled, due to a separate instance being created and thus comparisons using is failing. In the ensuing discussion, Victor Stinner supplied a list of currently used sentinel values in the Python standard library[2]. This showed that the need for sentinels is fairly common, that there are various implementation methods used even within the stdlib, and that many of these suffer from at least one of the three above drawbacks. The discussion did not lead to any clear consensus on whether a standard implementation method is needed or desirable, whether the drawbacks mentioned are significant, nor which kind of implementation would be good. The author of this PEP created an issue on bugs.python.org (now a GitHub issue[3]) suggesting options for improvement, but that focused on only a single problematic aspect of a few cases, and failed to gather any support. A poll[4] was created on discuss.python.org to get a clearer sense of the community's opinions. After nearly two weeks, significant further, discussion, and 39 votes, the poll's results were not conclusive. 40% had voted for "The status-quo is fine / there’s no need for consistency in this", but most voters had voted for one or more standardized solutions. Specifically, 37% of the voters chose "Consistent use of a new, dedicated sentinel factory / class / meta-class, also made publicly available in the stdlib". With such mixed opinions, this PEP was created to facilitate making a decision on the subject. While working on this PEP, iterating on various options and implementations and continuing discussions, the author has come to the opinion that a simple, good implementation available in the standard library would be worth having, both for use in the standard library itself and elsewhere. Rationale The criteria guiding the chosen implementation were: 1. The sentinel objects should behave as expected by a sentinel object: When compared using the is operator, it should always be considered identical to itself but never to any other object. 2. Creating a sentinel object should be a simple, straightforward one-liner. 3. It should be simple to define as many distinct sentinel values as needed. 4. The sentinel objects should have a clear and short repr. 5. It should be possible to use clear type signatures for sentinels. 6. The sentinel objects should behave correctly after copying and/or unpickling. 7. Such sentinels should work when using CPython 3.x and PyPy3, and ideally also with other implementations of Python. 8. As simple and straightforward as possible, in implementation and especially in use. Avoid this becoming one more special thing to learn when learning Python. It should be easy to find and use when needed, and obvious enough when reading code that one would normally not feel a need to look up its documentation. With so many uses in the Python standard library[5], it would be useful to have an implementation in the standard library, since the stdlib cannot use implementations of sentinel objects available elsewhere (such as the sentinels[6] or sentinel[7] PyPI packages). After researching existing idioms and implementations, and going through many different possible implementations, an implementation was written which meets all of these criteria (see Reference Implementation). Specification A new Sentinel class will be added to a new sentinels module. Its initializer will accept a single required argument, the name of the sentinel object, and three optional arguments: the repr of the object, its boolean value, and the name of its module: >>> from sentinels import Sentinel >>> NotGiven = Sentinel('NotGiven') >>> NotGiven <NotGiven> >>> MISSING = Sentinel('MISSING', repr='mymodule.MISSING') >>> MISSING mymodule.MISSING >>> MEGA = Sentinel('MEGA', repr='<MEGA>', bool_value=False, module_name='mymodule') <MEGA> Checking if a value is such a sentinel should be done using the is operator, as is recommended for None. Equality checks using == will also work as expected, returning True only when the object is compared with itself. Identity checks such as if value is MISSING: should usually be used rather than boolean checks such as if value: or if not value:. Sentinel instances are truthy by default, unlike None. This parallels the default for arbitrary classes, as well as the boolean value of Ellipsis. The names of sentinels are unique within each module. When calling Sentinel() in a module where a sentinel with that name was already defined, the existing sentinel with that name will be returned. Sentinels with the same name in different modules will be distinct from each other. Creating a copy of a sentinel object, such as by using copy.copy() or by pickling and unpickling, will return the same object. Type annotations for sentinel values should use Literal[<sentinel_object>]. For example: def foo(value: int | Literal[MISSING] = MISSING) -> int: ... The module_name optional argument should normally not need to be supplied, as Sentinel() will usually be able to recognize the module in which it was called. module_name should be supplied only in unusual cases when this automatic recognition does not work as intended, such as perhaps when using Jython or IronPython. This parallels the designs of Enum and namedtuple. For more details, see PEP 435. The Sentinel class may not be sub-classed, to avoid overly-clever uses based on it, such as attempts to use it as a base for implementing singletons. It is considered important that the addition of Sentinel to the stdlib should add minimal complexity. Ordering comparisons are undefined for sentinel objects. Backwards Compatibility While not breaking existing code, adding a new "sentinels" stdlib module could cause some confusion with regard to existing modules named "sentinels", and specifically with the "sentinels" package on PyPI. The existing "sentinels" package on PyPI[8] appears to be abandoned, with the latest release being made on Aug. 2016. Therefore, using this name for a new stdlib module seems reasonable. If and when this PEP is accepted, it may be worth verifying if this has indeed been abandoned, and if so asking to transfer ownership to the CPython maintainers to reduce the potential for confusion with the new stdlib module. How to Teach This The normal types of documentation of new stdlib modules and features, namely doc-strings, module docs and a section in "What's New", should suffice. Security Implications This proposal should have no security implications. Reference Implementation The reference implementation is found in a dedicated GitHub repo[9]. A simplified version follows: _registry = {} class Sentinel: """Unique sentinel values.""" def __new__(cls, name, repr=None, bool_value=True, module_name=None): name = str(name) repr = str(repr) if repr else f'<{name.split(".")[-1]}>' bool_value = bool(bool_value) if module_name is None: try: module_name = \ sys._getframe(1).f_globals.get('__name__', '__main__') except (AttributeError, ValueError): module_name = __name__ registry_key = f'{module_name}-{name}' sentinel = _registry.get(registry_key, None) if sentinel is not None: return sentinel sentinel = super().__new__(cls) sentinel._name = name sentinel._repr = repr sentinel._bool_value = bool_value sentinel._module_name = module_name return _registry.setdefault(registry_key, sentinel) def __repr__(self): return self._repr def __bool__(self): return self._bool_value def __reduce__(self): return ( self.__class__, ( self._name, self._repr, self._module_name, ), ) Rejected Ideas Use NotGiven = object() This suffers from all of the drawbacks mentioned in the Rationale section. Add a single new sentinel value, such as MISSING or Sentinel Since such a value could be used for various things in various places, one could not always be confident that it would never be a valid value in some use cases. On the other hand, a dedicated and distinct sentinel value can be used with confidence without needing to consider potential edge-cases. Additionally, it is useful to be able to provide a meaningful name and repr for a sentinel value, specific to the context where it is used. Finally, this was a very unpopular option in the poll[10], with only 12% of the votes voting for it. Use the existing Ellipsis sentinel value This is not the original intended use of Ellipsis, though it has become increasingly common to use it to define empty class or function blocks instead of using pass. Also, similar to a potential new single sentinel value, Ellipsis can't be as confidently used in all cases, unlike a dedicated, distinct value. Use a single-valued enum The suggested idiom is: class NotGivenType(Enum): NotGiven = 'NotGiven' NotGiven = NotGivenType.NotGiven Besides the excessive repetition, the repr is overly long: <NotGivenType.NotGiven: 'NotGiven'>. A shorter repr can be defined, at the expense of a bit more code and yet more repetition. Finally, this option was the least popular among the nine options in the poll[11], being the only option to receive no votes. A sentinel class decorator The suggested idiom is: @sentinel(repr='<NotGiven>') class NotGivenType: pass NotGiven = NotGivenType() While this allows for a very simple and clear implementation of the decorator, the idiom is too verbose, repetitive, and difficult to remember. Using class objects Since classes are inherently singletons, using a class as a sentinel value makes sense and allows for a simple implementation. The simplest version of this is: class NotGiven: pass To have a clear repr, one would need to use a meta-class: class NotGiven(metaclass=SentinelMeta): pass ... or a class decorator: @Sentinel class NotGiven: pass Using classes this way is unusual and could be confusing. The intention of code would be hard to understand without comments. It would also cause such sentinels to have some unexpected and undesirable behavior, such as being callable. Define a recommended "standard" idiom, without supplying an implementation Most common existing idioms have significant drawbacks. So far, no idiom has been found that is clear and concise while avoiding these drawbacks. Also, in the poll[12] on this subject, the options for recommending an idiom were unpopular, with the highest-voted option being voted for by only 25% of the voters. Additional Notes - This PEP and the initial implementation are drafted in a dedicated GitHub repo[13]. - For sentinels defined in a class scope, to avoid potential name clashes, one should use the fully-qualified name of the variable in the module. Only the part of the name after the last period will be used for the default repr. For example: >>> class MyClass: ... NotGiven = sentinel('MyClass.NotGiven') >>> MyClass.NotGiven <NotGiven> - One should be careful when creating sentinels in a function or method, since sentinels with the same name created by code in the same module will be identical. If distinct sentinel objects are needed, make sure to use distinct names. - There is no single desirable value for the "truthiness" of sentinels, i.e. their boolean value. It is sometimes useful for the boolean value to be True, and sometimes False. Of the built-in sentinels in Python, None evaluates to False, while Ellipsis (a.k.a. ...) evaluates to True. The desire for this to be set as needed came up in discussions as well. - The boolean value of NotImplemented is True, but using this is deprecated since Python 3.9 (doing so generates a deprecation warning.) This deprecation is due to issues specific to NotImplemented, as described in bpo-35712[14]. - To define multiple, related sentinel values, possibly with a defined ordering among them, one should instead use Enum or something similar. - There was a discussion on the typing-sig mailing list[15] about the typing for these sentinels, where different options were discussed. Open Issues - Is adding a new stdlib module the right way to go? I could not find any existing module which seems like a logical place for this. However, adding new stdlib modules should be done judiciously, so perhaps choosing an existing module would be preferable even if it is not a perfect fit? Footnotes Copyright This document is placed in the public domain or under the CC0-1.0-Universal license, whichever is more permissive. [1] Python-Dev mailing list: The repr of a sentinel [2] Python-Dev mailing list: "The stdlib contains tons of sentinels" [3] bpo-44123: Make function parameter sentinel values true singletons [4] discuss.python.org Poll: Sentinel Values in the Stdlib [5] Python-Dev mailing list: "The stdlib contains tons of sentinels" [6] The "sentinels" package on PyPI [7] The "sentinel" package on PyPI [8] sentinels package on PyPI [9] Reference implementation at the taleinat/python-stdlib-sentinels GitHub repo [10] discuss.python.org Poll: Sentinel Values in the Stdlib [11] discuss.python.org Poll: Sentinel Values in the Stdlib [12] discuss.python.org Poll: Sentinel Values in the Stdlib [13] Reference implementation at the taleinat/python-stdlib-sentinels GitHub repo [14] bpo-35712: Make NotImplemented unusable in boolean context [15] Discussion thread about type signatures for these sentinels on the typing-sig mailing list
python-peps
2024-10-18T13:23:32.139641
2021-06-06T00:00:00
{ "license": "Public Domain", "url": "https://peps.python.org/pep-0661/", "authors": [ "Tal Einat" ], "pep_number": "0661", "pandoc_version": "3.5" }
0710
PEP: 710 Title: Recording the provenance of installed packages Author: Fridolín Pokorný <fridolin.pokorny at gmail.com> Sponsor: Donald Stufft <[email protected]> PEP-Delegate: Paul Moore <[email protected]> Discussions-To: https://discuss.python.org/t/pep-710-recording-the-provenance-of-installed-packages/25428 Status: Draft Type: Standards Track Topic: Packaging Content-Type: text/x-rst Created: 27-Mar-2023 Post-History: 03-Dec-2021, 30-Jan-2023, 14-Mar-2023, 03-Apr-2023, Abstract This PEP describes a way to record the provenance of installed Python distributions. The record is created by an installer and is available to users in the form of a JSON file provenance_url.json in the .dist-info directory. The mentioned JSON file captures additional metadata to allow recording a URL to a distribution package together with the installed distribution hash. This proposal is built on top of PEP 610 following its corresponding canonical PyPA spec <packaging:direct-url> and complements direct_url.json with provenance_url.json for when packages are identified by a name, and optionally a version. Motivation Installing a Python Project involves downloading a Distribution Package from a Package Index and extracting its content to an appropriate place. After the installation process is done, information about the release artifact used as well as its source is generally lost. However, there are use cases for keeping records of distributions used for installing packages and their provenance. Python wheels can be built with different compiler flags or supporting different wheel tags. In both cases, users might get into a situation in which multiple wheels might be considered by installers (possibly from different package indexes) and immediately finding out which wheel file was actually used during the installation might be helpful. This way, developers can use information about wheels to debug issues making sure the desired wheel was actually installed. Another use case could be tools reporting software installed, such as tools reporting a SBOM (Software Bill of Materials), that might give more accurate reports. Yet another use case could be reconstruction of the Python environment by pinning each installed package to a specific distribution artifact consumed from a Python package index. Rationale The motivation described in this PEP is an extension of Recording the Direct URL Origin of installed distributions <packaging:direct-url> specification. In addition to recording provenance information for packages installed using a direct URL, installers should also do so for packages installed by name (and optionally version) from Python package indexes. The idea described in this PEP originated in a tool called micropipenv that is used to install distribution packages <Distribution Package> in containerized environments (see the reported issue thoth-station/micropipenv#206). Currently, the assembled containerized application does not implicitly carry information about the provenance of installed distribution packages (unless these are installed from full URLs and recorded via direct_url.json). This requires container image suppliers to link container images with the corresponding build process, its configuration and the application source code for checking requirements files in cases when software present in containerized environments needs to be audited. The subsequent discussion in the Discourse thread also brought up pip's new --report option that can generate a detailed JSON report about the installation process. This option could help with the provenance problem this PEP approaches. Nevertheless, this option needs to be explicitly passed to pip to obtain the provenance information, and includes additional metadata that might not be necessary for checking the provenance (such as Python version requirements of each distribution package). Also, this option is specific to pip as of the writing of this PEP. Note the current spec for recording installed packages <packaging:recording-installed-packages> defines a RECORD file that records installed files, but not the distribution artifact from which these files were obtained. Auditing installed artifacts can be performed based on matching the entries listed in the RECORD file. However, this technique requires a pre-computed database of files each artifact provides or a comparison with the actual artifact content. Both approaches are relatively expensive and time consuming operations which could be eliminated with the proposed provenance_url.json file. Recording provenance information for installed distribution packages, both those obtained from direct URLs and by name/version from an index, can simplify auditing Python environments in general, beyond just the specific use case for containerized applications mentioned earlier. A community project pip-audit raised their possible interest in pypa/pip-audit#170. Specification The keywords “MUST”, “MUST NOT”, “REQUIRED”, “SHOULD”, “SHOULD NOT”, “RECOMMENDED”, “MAY”, and “OPTIONAL” in this document are to be interpreted as described in 2119. The provenance_url.json file SHOULD be created in the .dist-info directory by installers when installing a Distribution Package specified by name (and optionally by Version Specifier). This file MUST NOT be created when installing a distribution package from a requirement specifying a direct URL reference (including a VCS URL). Only one of the files provenance_url.json and direct_url.json (from Recording the Direct URL Origin of installed distributions <packaging:direct-url> specification and the corresponding specification of the Direct URL Data Structure <packaging:direct-url-data-structure>), may be present in a given .dist-info directory; installers MUST NOT add both. The provenance_url.json JSON file MUST be a dictionary, compliant with 8259 and UTF-8 encoded. If present, it MUST contain exactly two keys. The first MUST be url, with type string. The second key MUST be archive_info with a value defined below. The value of the url key MUST be the URL from which the distribution package was downloaded. If a wheel is built from a source distribution, the url value MUST be the URL from which the source distribution was downloaded. If a wheel is downloaded and installed directly, the url field MUST be the URL from which the wheel was downloaded. As in the Direct URL Data Structure <packaging:direct-url-data-structure> specification, the url value MUST be stripped of any sensitive authentication information for security reasons. The user:password section of the URL MAY however be composed of environment variables, matching the following regular expression: \$\{[A-Za-z0-9-_]+\}(:\$\{[A-Za-z0-9-_]+\})? Additionally, the user:password section of the URL MAY be a well-known, non-security sensitive string. A typical example is git in the case of an URL such as ssh://[email protected]. The value of archive_info MUST be a dictionary with a single key hashes. The value of hashes is a dictionary mapping hash function names to a hex-encoded digest of the file referenced by the url value. At least one hash MUST be recorded. Multiple hashes MAY be included, and it is up to the consumer to decide what to do with multiple hashes (it may validate all of them or a subset of them, or nothing at all). Each hash MUST be one of the single argument hashes provided by py3.11:hashlib.algorithms_guaranteed, excluding sha1 and md5 which MUST NOT be used. As of Python 3.11, with shake_128 and shake_256 excluded for being multi-argument, the allowed set of hashes is: >>> import hashlib >>> sorted(hashlib.algorithms_guaranteed - {"shake_128", "shake_256", "sha1", "md5"}) ['blake2b', 'blake2s', 'sha224', 'sha256', 'sha384', 'sha3_224', 'sha3_256', 'sha3_384', 'sha3_512', 'sha512'] Each hash MUST be referenced by the canonical name of the hash, always lower case. Hashes sha1 and md5 MUST NOT be present, due to the security limitations of these hash algorithms. Conversely, hash sha256 SHOULD be included. Installers that cache distribution packages from an index SHOULD keep information related to the cached distribution artifact, so that the provenance_url.json file can be created even when installing distribution packages from the installer's cache. Backwards Compatibility Following the packaging:recording-installed-packages specification, installers may keep additional installer-specific files in the .dist-info directory. To make sure this PEP does not cause any backwards compatibility issues, a comprehensive survey of installers and libraries found no current tools that are using a similarly-named file, or other major feasibility concerns. The Wheel specification <packaging:binary-distribution-format> lists files that can be present in the .dist-info directory. None of these file names collide with the proposed provenance_url.json file from this PEP. Presence of provenance_url.json in installers and libraries A comprehensive survey of the existing installers, libraries, and dependency managers in the Python ecosystem analyzed the implications of adding support for provenance_url.json to each tool. In summary, no major backwards compatibility issues, conflicts or feasibility blockers were found as of the time of writing of this PEP. More details about the survey can be found in the Appendix: Survey of installers and libraries section. Compatibility with direct_url.json This proposal does not make any changes to the direct_url.json file described in PEP 610 and its corresponding canonical PyPA spec <packaging:direct-url>. The content of provenance_url.json file was designed in a way to eventually allow installers reuse some of the logic supporting direct_url.json when a direct URL refers to a source archive or a wheel. The main difference between the provenance_url.json and direct_url.json files are the mandatory keys and their values in the provenance_url.json file. This helps make sure consumers of the provenance_url.json file can rely on its content, if the file is present in the .dist-info directory. Security Implications One of the main security features of the provenance_url.json file is the ability to audit installed artifacts in Python environments. Tools can check which Python package indexes were used to install Python distribution packages <Distribution Package> as well as the hash digests of their release artifacts. As an example, we can take the recent compromised dependency chain in the PyTorch incident. The PyTorch index provided a package named torchtriton. An attacker published torchtriton on PyPI, which ran a malicious binary. By checking the URL of the installed Python distribution stated in the provenance_url.json file, tools can automatically check the source of the installed Python distribution. In case of the PyTorch incident, the URL of torchtriton should point to the PyTorch index, not PyPI. Tools can help identifying such malicious Python distributions installed by checking the installed Python distribution URL. A more exact check can include also the hash of the installed Python distribution stated in the provenance_url.json file. Such checks on hashes can be helpful for mirrored Python package indexes where Python distributions are not distinguishable by their source URLs, making sure only desired Python package distributions are installed. A malicious actor can intentionally adjust the content of provenance_url.json to possibly hide provenance information of the installed Python distribution. A security check which would uncover such malicious activity is beyond scope of this PEP as it would require monitoring actions on the filesystem and eventually reviewing user or file permissions. How to Teach This The provenance_url.json metadata file is intended for tools and is not directly visible to end users. Examples Examples of a valid provenance_url.json A valid provenance_url.json list multiple hashes: { "archive_info": { "hashes": { "blake2s": "fffeaf3d0bd71dc960ca2113af890a2f2198f2466f8cd58ce4b77c1fc54601ff", "sha256": "236bcb61156d76c4b8a05821b988c7b8c35bf0da28a4b614e8d6ab5212c25c6f", "sha3_256": "c856930e0f707266d30e5b48c667a843d45e79bb30473c464e92dfa158285eab", "sha512": "6bad5536c30a0b2d5905318a1592948929fbac9baf3bcf2e7faeaf90f445f82bc2b656d0a89070d8a6a9395761f4793c83187bd640c64b2656a112b5be41f73d" } }, "url": "https://files.pythonhosted.org/packages/07/51/2c0959c5adf988c44d9e1e0d940f5b074516ecc87e96b1af25f59de9ba38/pip-23.0.1-py3-none-any.whl" } A valid provenance_url.json listing a single hash entry: { "archive_info": { "hashes": { "sha256": "236bcb61156d76c4b8a05821b988c7b8c35bf0da28a4b614e8d6ab5212c25c6f" } }, "url": "https://files.pythonhosted.org/packages/07/51/2c0959c5adf988c44d9e1e0d940f5b074516ecc87e96b1af25f59de9ba38/pip-23.0.1-py3-none-any.whl" } A valid provenance_url.json listing a source distribution which was used to build and install a wheel: { "archive_info": { "hashes": { "sha256": "8bfe29f17c10e2f2e619de8033a07a224058d96b3bfe2ed61777596f7ffd7fa9" } }, "url": "https://files.pythonhosted.org/packages/1d/43/ad8ae671de795ec2eafd86515ef9842ab68455009d864c058d0c3dcf680d/micropipenv-0.0.1.tar.gz" } Examples of an invalid provenance_url.json The following example includes a hash key in the archive_info dictionary as originally designed in the data structure documented in packaging:direct-url. The hash key MUST NOT be present to prevent from any possible confusion with hashes and additional checks that would be required to keep hash values in sync. { "archive_info": { "hash": "sha256=236bcb61156d76c4b8a05821b988c7b8c35bf0da28a4b614e8d6ab5212c25c6f", "hashes": { "sha256": "236bcb61156d76c4b8a05821b988c7b8c35bf0da28a4b614e8d6ab5212c25c6f" } }, "url": "https://files.pythonhosted.org/packages/07/51/2c0959c5adf988c44d9e1e0d940f5b074516ecc87e96b1af25f59de9ba38/pip-23.0.1-py3-none-any.whl" } Another example demonstrates an invalid hash name. The referenced hash name does not correspond to the canonical hash names described in this PEP and in the Python docs under py3.11:hashlib.hash.name. { "archive_info": { "hashes": { "SHA-256": "236bcb61156d76c4b8a05821b988c7b8c35bf0da28a4b614e8d6ab5212c25c6f" } }, "url": "https://files.pythonhosted.org/packages/07/51/2c0959c5adf988c44d9e1e0d940f5b074516ecc87e96b1af25f59de9ba38/pip-23.0.1-py3-none-any.whl" } The last example demonstrates a provenance_url.json file with no hashes available for the downloaded artifact: { "archive_info": { "hashes": {} } "url": "https://files.pythonhosted.org/packages/07/51/2c0959c5adf988c44d9e1e0d940f5b074516ecc87e96b1af25f59de9ba38/pip-23.0.1-py3-none-any.whl" } Example pip commands and their effect on provenance_url.json and direct_url.json These commands generate a direct_url.json file but do not generate a provenance_url.json file. These examples follow examples from Direct URL Data Structure <packaging:direct-url-data-structure> specification: - pip install https://example.com/app-1.0.tgz - pip install https://example.com/app-1.0.whl - pip install "git+https://example.com/repo/app.git#egg=app&subdirectory=setup" - pip install ./app - pip install file:///home/user/app - pip install --editable "git+https://example.com/repo/app.git#egg=app&subdirectory=setup" (in which case, url will be the local directory where the git repository has been cloned to, and dir_info will be present with "editable": true and no vcs_info will be set) - pip install -e ./app Commands that generate a provenance_url.json file but do not generate a direct_url.json file: - pip install app - pip install app~=2.2.0 - pip install app --no-index --find-links "https://example.com/" This behaviour can be tested using changes to pip implemented in the PR pypa/pip#11865. Reference Implementation A proof-of-concept for creating the provenance_url.json metadata file when installing a Python Distribution Package is available in the PR to pip pypa/pip#11865. It reuses the already available implementation for the direct URL data structure <packaging:direct-url-data-structure> to provide the provenance_url.json metadata file for cases when direct_url.json is not created. A reference implementation for supporting the provenance_url.json file in PDM exists is available in pdm-project/pdm#3013. A prototype called pip-preserve was developed to demonstrate creation of requirements.txt files considering direct_url.json and provenance_url.json metadata files. This tool mimics the pip freeze functionality, but the listing of installed packages also includes the hashes of the Python distribution artifacts. To further support this proposal, pip-sbom demonstrates creation of SBOM in the SPDX format. The tool uses information stored in the provenance_url.json file. Rejected Ideas Naming the file direct_url.json instead of provenance_url.json To preserve backwards compatibility with the Recording the Direct URL Origin of installed distributions <packaging:direct-url>, the file cannot be named direct_url.json, as per the text of that specification: This file MUST NOT be created when installing a distribution from an other type of requirement (i.e. name plus version specifier). Such a change might introduce backwards compatibility issues for consumers of direct_url.json who rely on its presence only when distributions are installed using a direct URL reference. Deprecating direct_url.json and using only provenance_url.json File direct_url.json is already well established by the Direct URL Data Structure <packaging:direct-url-data-structure> specification and is already used by installers. For example, pip uses direct_url.json to report a direct URL reference on pip freeze. Deprecating direct_url.json would require additional changes to the pip freeze implementation in pip (see PR fridex/pip#2) and could introduce backwards compatibility issues for already existing direct_url.json consumers. Keeping the hash key in the archive_info dictionary Direct URL Data Structure <packaging:direct-url-data-structure> specification discusses the possibility to include the hash key alongside the hashes key in the archive_info dictionary. This PEP explicitly does not include the hash key in the provenance_url.json file and allows only the hashes key to be present. By doing so we eliminate possible redundancy in the file, possible confusion, and any additional checks that would need to be done to make sure the hashes are in sync. Allowing no hashes stated For cases when a wheel file is installed from pip's cache and built using an older version of pip, pip does not record hashes of the downloaded source distributions. As we do not have hashes of these downloaded source distributions, the hashes key in the provenance_url.json file would not contain any entries. In such cases, pip does not create any provenance_url.json file as the provenance information is not complete. It is encouraged for consumers to rebuild wheels with a newer version of pip in these cases. Making the hashes key optional PEP 610 and its corresponding canonical PyPA spec <packaging:direct-url> recommend including the hashes key of the archive_info in the direct_url.json file but it is not required (per the 2119 language): A hashes key SHOULD be present as a dictionary mapping a hash name to a hex encoded digest of the file. This PEP requires the hashes key be included in archive_info in the provenance_url.json file if that file is created; per this PEP: The value of archive_info MUST be a dictionary with a single key hashes. By doing so, consumers of provenance_url.json can check artifact digests when the provenance_url.json file is created by installers. Storing index URL A possibility was raised for storing the index URL as part of the file content. This index URL would represent the index configured in pip's configuration or specified using the --index-url or --extra-index-url options. Storing this information was considered confusing, especially when using other installation options like --find-links. Since the actual index URL is not strictly bound to the location from which the wheel file was downloaded, we decided not to store the index URL in the provenance_url.json file. Open Issues Availability of the provenance_url.json file in Conda We would like to get feedback on the provenance_url.json file from the Conda maintainers. It is not clear whether Conda would like to adopt the provenance_url.json file. Conda already stores provenance related information (similar to the provenance information proposed in this PEP) in JSON files located in the conda-meta directory following its actions during installation. Using provenance_url.json in downstream installers The proposed provenance_url.json file was meant to be adopted primarily by Python installers. Other installers, such as APT or DNF, might record the provenance of the installed downstream Python distributions in their own way specific to downstream package management. The proposed file is not expected to be created by these downstream package installers and thus they were intentionally left out of this PEP. However, any input by developers or maintainers of these installers is valuable to possibly enrich the provenance_url.json file with information that would help in some way. Appendix: Survey of installers and libraries pip The function from pip's internal API responsible for installing wheels, named _install_wheel, does not store any provenance_url.json file in the .dist-info directory. Additionally, a prototype introducing the mentioned file to pip in pypa/pip#11865 demonstrates incorporating logic for handling the provenance_url.json file in pip's source code. As pip is used by some of the tools mentioned below to install Python package distributions, findings for pip apply to these tools, as well as pip does not allow parametrizing creation of files in the .dist-info directory in its internal API. Most of the tools mentioned below that use pip invoke pip as a subprocess which has no effect on the eventual presence of the provenance_url.json file in the .dist-info directory. distlib distlib implements low-level functionality to manipulate the dist-info directory. The database of installed distributions does not use any file named provenance_url.json, based on the distlib's source code. Pipenv Pipenv uses pip to install Python package distributions. There wasn't any additional identified logic that would cause backwards compatibility issues when introducing the provenance_url.json file in the .dist-info directory. installer installer does not create a provenance_url.json file explicitly. Nevertheless, as per the Recording Installed Projects <packaging:recording-installed-packages> specification, installer allows passing the additional_metadata argument to create a file in the .dist-info directory - see the source code. To avoid any backwards compatibility issues, any library or tool using installer must not request creating the provenance_url.json file using the mentioned additional_metadata argument. Poetry The installation logic in Poetry depends on the installer.modern-installer configuration option (see docs). For cases when the installer.modern-installer configuration option is set to false, Poetry uses pip for installing Python package distributions. On the other hand, when installer.modern-installer configuration option is set to true, Poetry uses installer to install Python package distributions. As can be seen from the linked sources, there isn't passed any additional metadata file named provenance_url.json that would cause compatibility issues with this PEP. Conda Conda does not create any provenance_url.json file when Python package distributions are installed. Hatch Hatch uses pip to install project dependencies. micropipenv As micropipenv is a wrapper on top of pip, it uses pip to install Python distributions, for both lock files as well as for requirements files. Thamos Thamos uses micropipenv to install Python package distributions, hence any findings for micropipenv apply for Thamos. PDM PDM uses installer to install binary distributions. The only additional metadata file it eventually creates in the .dist-info directory is the REFER_TO file. uv uv is written in Rust and uses its own installation logic when installing wheels. It does not create any additional files in the .dist-info directory that would collide with the provenance_url.json file naming. References Acknowledgements Thanks to Dustin Ingram, Brett Cannon, and Paul Moore for the initial discussion in which this idea originated. Thanks to Donald Stufft, Ofek Lev, and Trishank Kuppusamy for early feedback and support to work on this PEP. Thanks to Gregory P. Smith, Stéphane Bidoul, and C.A.M. Gerlach for reviewing this PEP and providing valuable suggestions. Thanks to Seth Michael Larson for providing valuable suggestions and for the proposed pip-sbom prototype. Thanks to Stéphane Bidoul and Chris Jerdonek for PEP 610, and related Recording the Direct URL Origin of installed distributions <packaging:direct-url> and Direct URL Data Structure <packaging:direct-url-data-structure> specifications. Thanks to Frost Ming for raising possible concern around storing index URL in the provenance_url.json file and initial PEP 710 support in PDM. Last, but not least, thanks to Donald Stufft for sponsoring this PEP. Copyright This document is placed in the public domain or under the CC0-1.0-Universal license, whichever is more permissive.
python-peps
2024-10-18T13:23:32.187734
2023-03-27T00:00:00
{ "license": "Public Domain", "url": "https://peps.python.org/pep-0710/", "authors": [ "Fridolín Pokorný" ], "pep_number": "0710", "pandoc_version": "3.5" }
0604
PEP: 604 Title: Allow writing union types as X | Y Author: Philippe PRADOS <[email protected]>, Maggie Moss <[email protected]> Sponsor: Chris Angelico <[email protected]> BDFL-Delegate: Guido van Rossum <[email protected]> Discussions-To: [email protected] Status: Final Type: Standards Track Topic: Typing Created: 28-Aug-2019 Python-Version: 3.10 Post-History: 28-Aug-2019, 05-Aug-2020 python:types-union Abstract This PEP proposes overloading the | operator on types to allow writing Union[X, Y] as X | Y, and allows it to appear in isinstance and issubclass calls. Motivation PEP 484 and PEP 526 propose a generic syntax to add typing to variables, parameters and function returns. PEP 585 proposes to expose parameters to generics at runtime <585#parameters-to-generics-are-available-at-runtime>. Mypy[1] accepts a syntax which looks like: annotation: name_type name_type: NAME (args)? args: '[' paramslist ']' paramslist: annotation (',' annotation)* [','] - To describe a disjunction (union type), the user must use Union[X, Y]. The verbosity of this syntax does not help with type adoption. Proposal Inspired by Scala[2] and Pike[3], this proposal adds operator type.__or__(). With this new operator, it is possible to write int | str instead of Union[int, str]. In addition to annotations, the result of this expression would then be valid in isinstance() and issubclass(): isinstance(5, int | str) issubclass(bool, int | float) We will also be able to write t | None or None | t instead of Optional[t]: isinstance(None, int | None) isinstance(42, None | int) Specification The new union syntax should be accepted for function, variable and parameter annotations. Simplified Syntax # Instead of # def f(list: List[Union[int, str]], param: Optional[int]) -> Union[float, str] def f(list: List[int | str], param: int | None) -> float | str: pass f([1, "abc"], None) # Instead of typing.List[typing.Union[str, int]] typing.List[str | int] list[str | int] # Instead of typing.Dict[str, typing.Union[int, float]] typing.Dict[str, int | float] dict[str, int | float] The existing typing.Union and | syntax should be equivalent. int | str == typing.Union[int, str] typing.Union[int, int] == int int | int == int The order of the items in the Union should not matter for equality. (int | str) == (str | int) (int | str | float) == typing.Union[str, float, int] Optional values should be equivalent to the new union syntax None | t == typing.Optional[t] A new Union.__repr__() method should be implemented. str(int | list[str]) # int | list[str] str(int | int) # int isinstance and issubclass The new syntax should be accepted for calls to isinstance and issubclass as long as the Union items are valid arguments to isinstance and issubclass themselves. # valid isinstance("", int | str) # invalid isinstance(2, list[int]) # TypeError: isinstance() argument 2 cannot be a parameterized generic isinstance(1, int | list[int]) # valid issubclass(bool, int | float) # invalid issubclass(bool, bool | list[int]) Incompatible changes In some situations, some exceptions will not be raised as expected. If a metaclass implements the __or__ operator, it will override this: >>> class M(type): ... def __or__(self, other): return "Hello" ... >>> class C(metaclass=M): pass ... >>> C | int 'Hello' >>> int | C typing.Union[int, __main__.C] >>> Union[C, int] typing.Union[__main__.C, int] Objections and responses For more details about discussions, see links below: - Discussion in python-ideas - Discussion in typing-sig 1. Add a new operator for Union[type1, type2]? PROS: - This syntax can be more readable, and is similar to other languages (Scala, ...) - At runtime, int|str might return a simple object in 3.10, rather than everything that you'd need to grab from importing typing CONS: - Adding this operator introduces a dependency between typing and builtins - Breaks the backport (in that typing can easily be backported but core types can't) - If Python itself doesn't have to be changed, we'd still need to implement it in mypy, Pyre, PyCharm, Pytype, and who knows what else (it's a minor change see "Reference Implementation") 2. Change only PEP 484 (Type hints) to accept the syntax type1 | type2 ? PEP 563 (Postponed Evaluation of Annotations) is enough to accept this proposition, if we accept to not be compatible with the dynamic evaluation of annotations (eval()). >>> from __future__ import annotations >>> def foo() -> int | str: pass ... >>> eval(foo.__annotations__['return']) Traceback (most recent call last): File "<stdin>", line 1, in <module> File "<string>", line 1, in <module> TypeError: unsupported operand type(s) for |: 'type' and 'type' 3. Extend isinstance() and issubclass() to accept Union ? isinstance(x, str | int) ==> "is x an instance of str or int" PROS: - If they were permitted, then instance checking could use an extremely clean-looking notation CONS: - Must migrate all of the typing module in builtin Reference Implementation A new built-in Union type must be implemented to hold the return value of t1 | t2, and it must be supported by isinstance() and issubclass(). This type can be placed in the types module. Interoperability between types.Union and typing.Union must be provided. Once the Python language is extended, mypy[4] and other type checkers will need to be updated to accept this new syntax. - A proposed implementation for cpython is here. - A proposed implementation for mypy is here. References Copyright This document is placed in the public domain or under the CC0-1.0-Universal license, whichever is more permissive. [1] mypy http://mypy-lang.org/ [2] Scala Union Types https://dotty.epfl.ch/docs/reference/new-types/union-types.html [3] Pike http://pike.lysator.liu.se/docs/man/chapter_3.html#3.5 [4] mypy http://mypy-lang.org/
python-peps
2024-10-18T13:23:32.204218
2019-08-28T00:00:00
{ "license": "Public Domain", "url": "https://peps.python.org/pep-0604/", "authors": [ "Maggie Moss", "Philippe PRADOS" ], "pep_number": "0604", "pandoc_version": "3.5" }
0292
PEP: 292 Title: Simpler String Substitutions Author: Barry Warsaw <[email protected]> Status: Final Type: Standards Track Content-Type: text/x-rst Created: 18-Jun-2002 Python-Version: 2.4 Post-History: 18-Jun-2002, 23-Mar-2004, 22-Aug-2004 Replaces: 215 Abstract This PEP describes a simpler string substitution feature, also known as string interpolation. This PEP is "simpler" in two respects: 1. Python's current string substitution feature (i.e. %-substitution) is complicated and error prone. This PEP is simpler at the cost of some expressiveness. 2. PEP 215 proposed an alternative string interpolation feature, introducing a new $ string prefix. PEP 292 is simpler than this because it involves no syntax changes and has much simpler rules for what substitutions can occur in the string. Rationale Python currently supports a string substitution syntax based on C's printf() '%' formatting character[1]. While quite rich, %-formatting codes are also error prone, even for experienced Python programmers. A common mistake is to leave off the trailing format character, e.g. the 's' in "%(name)s". In addition, the rules for what can follow a % sign are fairly complex, while the usual application rarely needs such complexity. Most scripts need to do some string interpolation, but most of those use simple 'stringification' formats, i.e. %s or %(name)s This form should be made simpler and less error prone. A Simpler Proposal We propose the addition of a new class, called Template, which will live in the string module. The Template class supports new rules for string substitution; its value contains placeholders, introduced with the $ character. The following rules for $-placeholders apply: 1. $$ is an escape; it is replaced with a single $ 2. $identifier names a substitution placeholder matching a mapping key of "identifier". By default, "identifier" must spell a Python identifier as defined in[2]. The first non-identifier character after the $ character terminates this placeholder specification. 3. ${identifier} is equivalent to $identifier. It is required when valid identifier characters follow the placeholder but are not part of the placeholder, e.g. "${noun}ification". If the $ character appears at the end of the line, or is followed by any other character than those described above, a ValueError will be raised at interpolation time. Values in mapping are converted automatically to strings. No other characters have special meaning, however it is possible to derive from the Template class to define different substitution rules. For example, a derived class could allow for periods in the placeholder (e.g. to support a kind of dynamic namespace and attribute path lookup), or could define a delimiter character other than $. Once the Template has been created, substitutions can be performed by calling one of two methods: - substitute(). This method returns a new string which results when the values of a mapping are substituted for the placeholders in the Template. If there are placeholders which are not present in the mapping, a KeyError will be raised. - safe_substitute(). This is similar to the substitute() method, except that KeyErrors are never raised (due to placeholders missing from the mapping). When a placeholder is missing, the original placeholder will appear in the resulting string. Here are some examples: >>> from string import Template >>> s = Template('${name} was born in ${country}') >>> print s.substitute(name='Guido', country='the Netherlands') Guido was born in the Netherlands >>> print s.substitute(name='Guido') Traceback (most recent call last): [...] KeyError: 'country' >>> print s.safe_substitute(name='Guido') Guido was born in ${country} The signature of substitute() and safe_substitute() allows for passing the mapping of placeholders to values, either as a single dictionary-like object in the first positional argument, or as keyword arguments as shown above. The exact details and signatures of these two methods is reserved for the standard library documentation. Why $ and Braces? The BDFL said it best[3]: "The $ means "substitution" in so many languages besides Perl that I wonder where you've been. [...] We're copying this from the shell." Thus the substitution rules are chosen because of the similarity with so many other languages. This makes the substitution rules easier to teach, learn, and remember. Comparison to PEP 215 PEP 215 describes an alternate proposal for string interpolation. Unlike that PEP, this one does not propose any new syntax for Python. All the proposed new features are embodied in a new library module. PEP 215 proposes a new string prefix representation such as $"" which signal to Python that a new type of string is present. $-strings would have to interact with the existing r-prefixes and u-prefixes, essentially doubling the number of string prefix combinations. PEP 215 also allows for arbitrary Python expressions inside the $-strings, so that you could do things like: import sys print $"sys = $sys, sys = $sys.modules['sys']" which would return: sys = <module 'sys' (built-in)>, sys = <module 'sys' (built-in)> It's generally accepted that the rules in PEP 215 are safe in the sense that they introduce no new security issues (see PEP 215, "Security Issues" for details). However, the rules are still quite complex, and make it more difficult to see the substitution placeholder in the original $-string. The interesting thing is that the Template class defined in this PEP is designed for inheritance and, with a little extra work, it's possible to support PEP 215's functionality using existing Python syntax. For example, one could define subclasses of Template and dict that allowed for a more complex placeholder syntax and a mapping that evaluated those placeholders. Internationalization The implementation supports internationalization by recording the original template string in the Template instance's template attribute. This attribute would serve as the lookup key in an gettext-based catalog. It is up to the application to turn the resulting string back into a Template for substitution. However, the Template class was designed to work more intuitively in an internationalized application, by supporting the mixing-in of Template and unicode subclasses. Thus an internationalized application could create an application-specific subclass, multiply inheriting from Template and unicode, and using instances of that subclass as the gettext catalog key. Further, the subclass could alias the special __mod__() method to either .substitute() or .safe_substitute() to provide a more traditional string/unicode like %-operator substitution syntax. Reference Implementation The implementation[4] has been committed to the Python 2.4 source tree. References Copyright This document has been placed in the public domain. [1] String Formatting Operations https://docs.python.org/release/2.6/library/stdtypes.html#string-formatting-operations [2] Identifiers and Keywords https://docs.python.org/release/2.6/reference/lexical_analysis.html#identifiers-and-keywords [3] https://mail.python.org/pipermail/python-dev/2002-June/025652.html [4] Reference Implementation http://sourceforge.net/tracker/index.php?func=detail&aid=1014055&group_id=5470&atid=305470
python-peps
2024-10-18T13:23:32.216151
2002-06-18T00:00:00
{ "license": "Public Domain", "url": "https://peps.python.org/pep-0292/", "authors": [ "Barry Warsaw" ], "pep_number": "0292", "pandoc_version": "3.5" }
0756
PEP: 756 Title: Add PyUnicode_Export() and PyUnicode_Import() C functions Author: Victor Stinner <[email protected]> PEP-Delegate: C API Working Group Discussions-To: https://discuss.python.org/t/63891 Status: Draft Type: Standards Track Created: 13-Sep-2024 Python-Version: 3.14 Post-History: 14-Sep-2024 Abstract Add functions to the limited C API version 3.14: - PyUnicode_Export(): export a Python str object as a Py_buffer view. - PyUnicode_Import(): import a Python str object. On CPython, PyUnicode_Export() has an O(1) complexity: no memory is copied and no conversion is done. Rationale PEP 393 PEP 393 "Flexible String Representation" changed string internals in Python 3.3 to use three formats: - PyUnicode_1BYTE_KIND: Unicode range [U+0000; U+00ff], UCS-1, 1 byte/character. - PyUnicode_2BYTE_KIND: Unicode range [U+0000; U+ffff], UCS-2, 2 bytes/character. - PyUnicode_4BYTE_KIND: Unicode range [U+0000; U+10ffff], UCS-4, 4 bytes/character. A Python str object must always use the most compact format. For example, a string which only contains ASCII characters must use the UCS-1 format. The PyUnicode_KIND() function can be used to know the format used by a string. One of the following functions can be used to access data: - PyUnicode_1BYTE_DATA() for PyUnicode_1BYTE_KIND. - PyUnicode_2BYTE_DATA() for PyUnicode_2BYTE_KIND. - PyUnicode_4BYTE_DATA() for PyUnicode_4BYTE_KIND. To get the best performance, a C extension should have 3 code paths for each of these 3 string native formats. Limited C API PEP 393 functions such as PyUnicode_KIND() and PyUnicode_1BYTE_DATA() are excluded from the limited C API. It's not possible to write code specialized for UCS formats. A C extension using the limited C API can only use less efficient code paths and string formats. For example, the MarkupSafe project has a C extension specialized for UCS formats for best performance, and so cannot use the limited C API. Specification API Add the following API to the limited C API version 3.14: int32_t PyUnicode_Export( PyObject *unicode, int32_t requested_formats, Py_buffer *view); PyObject* PyUnicode_Import( const void *data, Py_ssize_t nbytes, int32_t format); #define PyUnicode_FORMAT_UCS1 0x01 // Py_UCS1* #define PyUnicode_FORMAT_UCS2 0x02 // Py_UCS2* #define PyUnicode_FORMAT_UCS4 0x04 // Py_UCS4* #define PyUnicode_FORMAT_UTF8 0x08 // char* #define PyUnicode_FORMAT_ASCII 0x10 // char* (ASCII string) The int32_t type is used instead of int to have a well defined type size and not depend on the platform or the compiler. See Avoid C-specific Types for the longer rationale. PyUnicode_Export() API: int32_t PyUnicode_Export( PyObject *unicode, int32_t requested_formats, Py_buffer *view) Export the contents of the unicode string in one of the requested_formats. - On success, fill view, and return a format (greater than 0). - On error, set an exception, and return -1. view is left unchanged. After a successful call to PyUnicode_Export(), the view buffer must be released by PyBuffer_Release(). The contents of the buffer are valid until they are released. The buffer is read-only and must not be modified. The view->len member must be used to get the string length. The buffer should end with a trailing NUL character, but it's not recommended to rely on that because of embedded NUL characters. unicode and view must not be NULL. Available formats: Constant Identifier Value Description ------------------------ ------- ------------------------- PyUnicode_FORMAT_UCS1 0x01 UCS-1 string (Py_UCS1*) PyUnicode_FORMAT_UCS2 0x02 UCS-2 string (Py_UCS2*) PyUnicode_FORMAT_UCS4 0x04 UCS-4 string (Py_UCS4*) PyUnicode_FORMAT_UTF8 0x08 UTF-8 string (char*) PyUnicode_FORMAT_ASCII 0x10 ASCII string (Py_UCS1*) UCS-2 and UCS-4 use the native byte order. requested_formats can be a single format or a bitwise combination of the formats in the table above. On success, the returned format will be set to a single one of the requested formats. Note that future versions of Python may introduce additional formats. No memory is copied and no conversion is done. Export complexity On CPython, an export has a complexity of O(1): no memory is copied and no conversion is done. To get the best performance on CPython and PyPy, it's recommended to support these 4 formats: (PyUnicode_FORMAT_UCS1 \ | PyUnicode_FORMAT_UCS2 \ | PyUnicode_FORMAT_UCS4 \ | PyUnicode_FORMAT_UTF8) PyPy uses UTF-8 natively and so the PyUnicode_FORMAT_UTF8 format is recommended. It requires a memory copy, since PyPy str objects can be moved in memory (PyPy uses a moving garbage collector). Py_buffer format and item size Py_buffer uses the following format and item size depending on the export format: Export format Buffer format Item size ------------------------ --------------- ----------- PyUnicode_FORMAT_UCS1 "B" 1 byte PyUnicode_FORMAT_UCS2 "=H" 2 bytes PyUnicode_FORMAT_UCS4 "=I" 4 bytes PyUnicode_FORMAT_UTF8 "B" 1 byte PyUnicode_FORMAT_ASCII "B" 1 byte PyUnicode_Import() API: PyObject* PyUnicode_Import( const void *data, Py_ssize_t nbytes, int32_t format) Create a Unicode string object from a buffer in a supported format. - Return a reference to a new string object on success. - Set an exception and return NULL on error. data must not be NULL. nbytes must be positive or zero. See PyUnicode_Export() for the available formats. UTF-8 format CPython 3.14 doesn't use the UTF-8 format internally and doesn't support exporting a string as UTF-8. The PyUnicode_AsUTF8AndSize() function can be used instead. The PyUnicode_FORMAT_UTF8 format is provided for compatibility with alternate implementations which may use UTF-8 natively for strings. ASCII format When the PyUnicode_FORMAT_ASCII format is request for export, the PyUnicode_FORMAT_UCS1 export format is used for ASCII strings. The PyUnicode_FORMAT_ASCII format is mostly useful for PyUnicode_Import() to validate that a string only contains ASCII characters. Surrogate characters and embedded NUL characters Surrogate characters are allowed: they can be imported and exported. Embedded NUL characters are allowed: they can be imported and exported. Implementation https://github.com/python/cpython/pull/123738 Backwards Compatibility There is no impact on the backward compatibility, only new C API functions are added. Usage of PEP 393 C APIs A code search on PyPI top 7,500 projects (in March 2024) shows that there are many projects importing and exporting UCS formats with the regular C API. PyUnicode_FromKindAndData() 25 projects call PyUnicode_FromKindAndData(): - Cython (3.0.9) - Levenshtein (0.25.0) - PyICU (2.12) - PyICU-binary (2.7.4) - PyQt5 (5.15.10) - PyQt6 (6.6.1) - aiocsv (1.3.1) - asyncpg (0.29.0) - biopython (1.83) - catboost (1.2.3) - cffi (1.16.0) - mojimoji (0.0.13) - mwparserfromhell (0.6.6) - numba (0.59.0) - numpy (1.26.4) - orjson (3.9.15) - pemja (0.4.1) - pyahocorasick (2.0.0) - pyjson5 (1.6.6) - rapidfuzz (3.6.2) - regex (2023.12.25) - srsly (2.4.8) - tokenizers (0.15.2) - ujson (5.9.0) - unicodedata2 (15.1.0) PyUnicode_4BYTE_DATA() 21 projects call PyUnicode_2BYTE_DATA() and/or PyUnicode_4BYTE_DATA(): - Cython (3.0.9) - MarkupSafe (2.1.5) - Nuitka (2.1.2) - PyICU (2.12) - PyICU-binary (2.7.4) - PyQt5_sip (12.13.0) - PyQt6_sip (13.6.0) - biopython (1.83) - catboost (1.2.3) - cement (3.0.10) - cffi (1.16.0) - duckdb (0.10.0) - mypy (1.9.0) - numpy (1.26.4) - orjson (3.9.15) - pemja (0.4.1) - pyahocorasick (2.0.0) - pyjson5 (1.6.6) - pyobjc-core (10.2) - sip (6.8.3) - wxPython (4.2.1) Rejected Ideas Reject embedded NUL characters and require trailing NUL character In C, it's convenient to have a trailing NUL character. For example, the for (; *str != 0; str++) loop can be used to iterate on characters and strlen() can be used to get a string length. The problem is that a Python str object can embed NUL characters. Example: "ab\0c". If a string contains an embedded NUL character, code relying on the NUL character to find the string end truncates the string. It can lead to bugs, or even security vulnerabilities. See a previous discussion in the issue Change PyUnicode_AsUTF8() to return NULL on embedded null characters. Rejecting embedded NUL characters require to scan the string which has an O(n) complexity. Reject surrogate characters Surrogate characters are characters in the Unicode range [U+D800; U+DFFF]. They are disallowed by UTF codecs such as UTF-8. A Python str object can contain arbitrary lone surrogate characters. Example: "\uDC80". Rejecting surrogate characters prevents exporting a string which contains such a character. It can be surprising and annoying since the PyUnicode_Export() caller doesn't control the string contents. Allowing surrogate characters allows to export any string and so avoid this issue. For example, the UTF-8 codec can be used with the surrogatepass error handler to encode and decode surrogate characters. Conversions on demand It would be convenient to convert formats on demand. For example, convert UCS-1 and UCS-2 to UCS-4 if an export to only UCS-4 is requested. The problem is that most users expect an export to require no memory copy and no conversion: an O(1) complexity. It is better to have an API where all operations have an O(1) complexity. Export to UTF-8 CPython 3.14 has a cache to encode a string to UTF-8. It is tempting to allow exporting to UTF-8. The problem is that the UTF-8 cache doesn't support surrogate characters. An export is expected to provide the whole string content, including embedded NUL characters and surrogate characters. To export surrogate characters, a different code path using the surrogatepass error handler is needed and each export operation has to allocate a temporary buffer: O(n) complexity. An export is expected to have an O(1) complexity, so the idea to export UTF-8 in CPython was abadonned. Discussions - https://discuss.python.org/t/63891 - https://github.com/capi-workgroup/decisions/issues/33 - https://github.com/python/cpython/issues/119609 Copyright This document is placed in the public domain or under the CC0-1.0-Universal license, whichever is more permissive.
python-peps
2024-10-18T13:23:32.329884
2024-09-13T00:00:00
{ "license": "Public Domain", "url": "https://peps.python.org/pep-0756/", "authors": [ "Victor Stinner" ], "pep_number": "0756", "pandoc_version": "3.5" }
0236
PEP: 236 Title: Back to the __future__ Version: $Revision$ Last-Modified: $Date$ Author: Tim Peters <[email protected]> Status: Final Type: Standards Track Content-Type: text/x-rst Created: 26-Feb-2001 Python-Version: 2.1 Post-History: 26-Feb-2001 Motivation From time to time, Python makes an incompatible change to the advertised semantics of core language constructs, or changes their accidental (implementation-dependent) behavior in some way. While this is never done capriciously, and is always done with the aim of improving the language over the long term, over the short term it's contentious and disrupting. PEP 5, Guidelines for Language Evolution suggests ways to ease the pain, and this PEP introduces some machinery in support of that. PEP 227, Statically Nested Scopes is the first application, and will be used as an example here. Intent [Note: This is policy, and so should eventually move into PEP 5] When an incompatible change to core language syntax or semantics is being made: 1. The release C that introduces the change does not change the syntax or semantics by default. 2. A future release R is identified in which the new syntax or semantics will be enforced. 3. The mechanisms described in PEP 230, Warning Framework are used to generate warnings, whenever possible, about constructs or operations whose meaning may[1] change in release R. 4. The new future_statement (see below) can be explicitly included in a module M to request that the code in module M use the new syntax or semantics in the current release C. So old code continues to work by default, for at least one release, although it may start to generate new warning messages. Migration to the new syntax or semantics can proceed during that time, using the future_statement to make modules containing it act as if the new syntax or semantics were already being enforced. Note that there is no need to involve the future_statement machinery in new features unless they can break existing code; fully backward- compatible additions can-- and should --be introduced without a corresponding future_statement. Syntax A future_statement is simply a from/import statement using the reserved module name __future__: future_statement: "from" "__future__" "import" feature ["as" name] (","feature ["as" name])* feature: identifier name: identifier In addition, all future_statements must appear near the top of the module. The only lines that can appear before a future_statement are: - The module docstring (if any). - Comments. - Blank lines. - Other future_statements. Example: """This is a module docstring.""" # This is a comment, preceded by a blank line and followed by # a future_statement. from __future__ import nested_scopes from math import sin from __future__ import alabaster_weenoblobs # compile-time error! # That was an error because preceded by a non-future_statement. Semantics A future_statement is recognized and treated specially at compile time: changes to the semantics of core constructs are often implemented by generating different code. It may even be the case that a new feature introduces new incompatible syntax (such as a new reserved word), in which case the compiler may need to parse the module differently. Such decisions cannot be pushed off until runtime. For any given release, the compiler knows which feature names have been defined, and raises a compile-time error if a future_statement contains a feature not known to it[2]. The direct runtime semantics are the same as for any import statement: there is a standard module __future__.py, described later, and it will be imported in the usual way at the time the future_statement is executed. The interesting runtime semantics depend on the specific feature(s) "imported" by the future_statement(s) appearing in the module. Note that there is nothing special about the statement: import __future__ [as name] That is not a future_statement; it's an ordinary import statement, with no special semantics or syntax restrictions. Example Consider this code, in file scope.py: x = 42 def f(): x = 666 def g(): print "x is", x g() f() Under 2.0, it prints: x is 42 Nested scopes (PEP 227) are being introduced in 2.1. But under 2.1, it still prints: x is 42 and also generates a warning. In 2.2, and also in 2.1 if from __future__ import nested_scopes is included at the top of scope.py, it prints: x is 666 Standard Module __future__.py Lib/__future__.py is a real module, and serves three purposes: 1. To avoid confusing existing tools that analyze import statements and expect to find the modules they're importing. 2. To ensure that future_statements run under releases prior to 2.1 at least yield runtime exceptions (the import of __future__ will fail, because there was no module of that name prior to 2.1). 3. To document when incompatible changes were introduced, and when they will be-- or were --made mandatory. This is a form of executable documentation, and can be inspected programmatically via importing __future__ and examining its contents. Each statement in __future__.py is of the form: FeatureName = "_Feature(" OptionalRelease "," MandatoryRelease ")" where, normally, OptionalRelease < MandatoryRelease, and both are 5-tuples of the same form as sys.version_info: (PY_MAJOR_VERSION, # the 2 in 2.1.0a3; an int PY_MINOR_VERSION, # the 1; an int PY_MICRO_VERSION, # the 0; an int PY_RELEASE_LEVEL, # "alpha", "beta", "candidate" or "final"; string PY_RELEASE_SERIAL # the 3; an int ) OptionalRelease records the first release in which: from __future__ import FeatureName was accepted. In the case of MandatoryReleases that have not yet occurred, MandatoryRelease predicts the release in which the feature will become part of the language. Else MandatoryRelease records when the feature became part of the language; in releases at or after that, modules no longer need: from __future__ import FeatureName to use the feature in question, but may continue to use such imports. MandatoryRelease may also be None, meaning that a planned feature got dropped. Instances of class _Feature have two corresponding methods, .getOptionalRelease() and .getMandatoryRelease(). No feature line will ever be deleted from __future__.py. Example line: nested_scopes = _Feature((2, 1, 0, "beta", 1), (2, 2, 0, "final", 0)) This means that: from __future__ import nested_scopes will work in all releases at or after 2.1b1, and that nested_scopes are intended to be enforced starting in release 2.2. Resolved Problem: Runtime Compilation Several Python features can compile code during a module's runtime: 1. The exec statement. 2. The execfile() function. 3. The compile() function. 4. The eval() function. 5. The input() function. Since a module M containing a future_statement naming feature F explicitly requests that the current release act like a future release with respect to F, any code compiled dynamically from text passed to one of these from within M should probably also use the new syntax or semantics associated with F. The 2.1 release does behave this way. This isn't always desired, though. For example, doctest.testmod(M) compiles examples taken from strings in M, and those examples should use M's choices, not necessarily the doctest module's choices. In the 2.1 release, this isn't possible, and no scheme has yet been suggested for working around this. NOTE: PEP 264 later addressed this in a flexible way, by adding optional arguments to compile(). In any case, a future_statement appearing "near the top" (see Syntax above) of text compiled dynamically by an exec, execfile() or compile() applies to the code block generated, but has no further effect on the module that executes such an exec, execfile() or compile(). This can't be used to affect eval() or input(), however, because they only allow expression input, and a future_statement is not an expression. Resolved Problem: Native Interactive Shells There are two ways to get an interactive shell: 1. By invoking Python from a command line without a script argument. 2. By invoking Python from a command line with the -i switch and with a script argument. An interactive shell can be seen as an extreme case of runtime compilation (see above): in effect, each statement typed at an interactive shell prompt runs a new instance of exec, compile() or execfile(). A future_statement typed at an interactive shell applies to the rest of the shell session's life, as if the future_statement had appeared at the top of a module. Resolved Problem: Simulated Interactive Shells Interactive shells "built by hand" (by tools such as IDLE and the Emacs Python-mode) should behave like native interactive shells (see above). However, the machinery used internally by native interactive shells has not been exposed, and there isn't a clear way for tools building their own interactive shells to achieve the desired behavior. NOTE: PEP 264 later addressed this, by adding intelligence to the standard codeop.py. Simulated shells that don't use the standard library shell helpers can get a similar effect by exploiting the new optional arguments to compile() added by PEP 264. Questions and Answers What about a "from __past__" version, to get back old behavior? Outside the scope of this PEP. Seems unlikely to the author, though. Write a PEP if you want to pursue it. What about incompatibilities due to changes in the Python virtual machine? Outside the scope of this PEP, although PEP 5 suggests a grace period there too, and the future_statement may also have a role to play there. What about incompatibilities due to changes in Python's C API? Outside the scope of this PEP. I want to wrap future_statements in try/except blocks, so I can use different code depending on which version of Python I'm running. Why can't I? Sorry! try/except is a runtime feature; future_statements are primarily compile-time gimmicks, and your try/except happens long after the compiler is done. That is, by the time you do try/except, the semantics in effect for the module are already a done deal. Since the try/except wouldn't accomplish what it looks like it should accomplish, it's simply not allowed. We also want to keep these special statements very easy to find and to recognize. Note that you can import __future__ directly, and use the information in it, along with sys.version_info, to figure out where the release you're running under stands in relation to a given feature's status. Going back to the nested_scopes example, what if release 2.2 comes along and I still haven't changed my code? How can I keep the 2.1 behavior then? By continuing to use 2.1, and not moving to 2.2 until you do change your code. The purpose of future_statement is to make life easier for people who keep current with the latest release in a timely fashion. We don't hate you if you don't, but your problems are much harder to solve, and somebody with those problems will need to write a PEP addressing them. future_statement is aimed at a different audience. Overloading import sucks. Why not introduce a new statement for this? Like maybe lambda lambda nested_scopes? That is, unless we introduce a new keyword, we can't introduce an entirely new statement. But if we introduce a new keyword, that in itself would break old code. That would be too ironic to bear. Yes, overloading import does suck, but not as energetically as the alternatives -- as is, future_statements are 100% backward compatible. Copyright This document has been placed in the public domain. References and Footnotes [1] Note that this is may and not will: better safe than sorry. Of course spurious warnings won't be generated when avoidable with reasonable cost. [2] This ensures that a future_statement run under a release prior to the first one in which a given feature is known (but >= 2.1) will raise a compile-time error rather than silently do a wrong thing. If transported to a release prior to 2.1, a runtime error will be raised because of the failure to import __future__ (no such module existed in the standard distribution before the 2.1 release, and the double underscores make it a reserved name).
python-peps
2024-10-18T13:23:32.348918
2001-02-26T00:00:00
{ "license": "Public Domain", "url": "https://peps.python.org/pep-0236/", "authors": [ "Tim Peters" ], "pep_number": "0236", "pandoc_version": "3.5" }
0540
PEP: 540 Title: Add a new UTF-8 Mode Version: $Revision$ Last-Modified: $Date$ Author: Victor Stinner <[email protected]> BDFL-Delegate: INADA Naoki Status: Final Type: Standards Track Content-Type: text/x-rst Created: 05-Jan-2016 Python-Version: 3.7 Resolution: https://mail.python.org/pipermail/python-dev/2017-December/151173.html Abstract Add a new "UTF-8 Mode" to enhance Python's use of UTF-8. When UTF-8 Mode is active, Python will: - use the utf-8 encoding, regardless of the locale currently set by the current platform, and - change the stdin and stdout error handlers to surrogateescape. This mode is off by default, but is automatically activated when using the "POSIX" locale. Add the -X utf8 command line option and PYTHONUTF8 environment variable to control UTF-8 Mode. Rationale Locale encoding and UTF-8 Python 3.6 uses the locale encoding for filenames, environment variables, standard streams, etc. The locale encoding is inherited from the locale; the encoding and the locale are tightly coupled. Many users inherit the ASCII encoding from the POSIX locale, aka the "C" locale, but are unable change the locale for various reasons. This encoding is very limited in term of Unicode support: any non-ASCII character is likely to cause trouble. It isn't always easy to get an accurate locale. Locales don't get the exact same name on different Linux distributions, FreeBSD, macOS, etc. And some locales, like the recent C.UTF-8 locale, are only supported by a few platforms. The current locale can even vary on the same platform depending on context; for example, a SSH connection can use a different encoding than the filesystem or local terminal encoding on the same machine. On the flip side, Python 3.6 is already using UTF-8 by default on macOS, Android and Windows (PEP 529) for most functions -- although open() is a notable exception here. UTF-8 is also the default encoding of Python scripts, XML and JSON file formats. The Go programming language uses UTF-8 for all strings. UTF-8 support is nearly ubiquitous for data read and written by modern platforms. It also has excellent support in Python. The problem is simply that the locale is frequently misconfigured. An obvious solution suggests itself: ignore the locale encoding and use UTF-8. Passthrough for undecodable bytes: surrogateescape When decoding bytes from UTF-8 using the default strict error handler, Python 3 raises a UnicodeDecodeError on the first undecodable byte. Unix command line tools like cat or grep and most Python 2 applications simply do not have this class of bugs: they don't decode data, but process data as a raw bytes sequence. Python 3 already has a solution to behave like Unix tools and Python 2: the surrogateescape error handler (PEP 383). It allows processing data as if it were bytes, but uses Unicode in practice; undecodable bytes are stored as surrogate characters. UTF-8 Mode sets the surrogateescape error handler for stdin and stdout, since these streams as commonly associated to Unix command line tools. However, users have a different expectation on files. Files are expected to be properly encoded, and Python is expected to fail early when open() is called with the wrong options, like opening a JPEG picture in text mode. The open() default error handler remains strict for these reasons. No change by default for best backward compatibility While UTF-8 is perfect in most cases, sometimes the locale encoding is actually the best encoding. This PEP changes the behaviour for the POSIX locale since this locale is usually equivalent to the ASCII encoding, whereas UTF-8 is a much better choice. It does not change the behaviour for other locales to prevent any risk or regression. As users are responsible to enable explicitly the new UTF-8 Mode for these other locales, they are responsible for any potential mojibake issues caused by UTF-8 Mode. Proposal Add a new UTF-8 Mode to use the UTF-8 encoding, ignore the locale encoding, and change stdin and stdout error handlers to surrogateescape. Add the new -X utf8 command line option and PYTHONUTF8 environment variable. Users can explicitly activate UTF-8 Mode with the command-line option -X utf8 or by setting the environment variable PYTHONUTF8=1. This mode is disabled by default and enabled by the POSIX locale. Users can explicitly disable UTF-8 Mode with the command-line option -X utf8=0 or by setting the environment variable PYTHONUTF8=0. For standard streams, the PYTHONIOENCODING environment variable has priority over UTF-8 Mode. On Windows, the PYTHONLEGACYWINDOWSFSENCODING environment variable (PEP 529) has the priority over UTF-8 Mode. Effects of UTF-8 Mode: - sys.getfilesystemencoding() returns 'UTF-8'. - locale.getpreferredencoding() returns UTF-8; its do_setlocale argument, and the locale encoding, are ignored. - sys.stdin and sys.stdout error handler is set to surrogateescape. Side effects: - open() uses the UTF-8 encoding by default. However, it still uses the strict error handler by default. - os.fsdecode() and os.fsencode() use the UTF-8 encoding. - Command line arguments, environment variables and filenames use the UTF-8 encoding. Relationship with the locale coercion (PEP 538) The POSIX locale enables the locale coercion (PEP 538) and the UTF-8 mode (PEP 540). When the locale coercion is enabled, enabling the UTF-8 mode has no additional effect. The UTF-8 Mode has the same effect as locale coercion: - sys.getfilesystemencoding() returns 'UTF-8', - locale.getpreferredencoding() returns UTF-8, and - the sys.stdin and sys.stdout error handlers are set to surrogateescape. These changes only affect Python code. But the locale coercion has additional effects: the LC_CTYPE environment variable and the LC_CTYPE locale are set to a UTF-8 locale like C.UTF-8. One side effect is that non-Python code is also impacted by the locale coercion. The two PEPs are complementary. On platforms like Centos 7 where locale coercion is not supported, the POSIX locale only enables UTF-8 Mode. In this case, Python code uses the UTF-8 encoding and ignores the locale encoding, whereas non-Python code uses the locale encoding, which is usually ASCII for the POSIX locale. While the UTF-8 Mode is supported on all platforms and can be enabled with any locale, the locale coercion is not supported by all platforms and is restricted to the POSIX locale. The UTF-8 Mode has only an impact on Python child processes when the PYTHONUTF8 environment variable is set to 1, whereas the locale coercion sets the LC_CTYPE environment variables which impacts all child processes. The benefit of the locale coercion approach is that it helps ensure that encoding handling in binary extension modules and child processes is consistent with Python's encoding handling. The upside of the UTF-8 Mode approach is that it allows an embedding application to change the interpreter's behaviour without having to change the process global locale settings. Backward Compatibility The only backward incompatible change is that the POSIX locale now enables the UTF-8 Mode by default: it will now use the UTF-8 encoding, ignore the locale encoding, and change stdin and stdout error handlers to surrogateescape. Annex: Encodings And Error Handlers UTF-8 Mode changes the default encoding and error handler used by open(), os.fsdecode(), os.fsencode(), sys.stdin, sys.stdout and sys.stderr. Encoding and error handler Function Default UTF-8 Mode or POSIX locale ------------------------------ ------------------------- ---------------------------- open() locale/strict UTF-8/strict os.fsdecode(), os.fsencode() locale/surrogateescape UTF-8/surrogateescape sys.stdin, sys.stdout locale/strict UTF-8/surrogateescape sys.stderr locale/backslashreplace UTF-8/backslashreplace By comparison, Python 3.6 uses: Function Default POSIX locale ------------------------------ ------------------------- ---------------------------- open() locale/strict locale/strict os.fsdecode(), os.fsencode() locale/surrogateescape locale/surrogateescape sys.stdin, sys.stdout locale/strict locale/**surrogateescape** sys.stderr locale/backslashreplace locale/backslashreplace Encoding and error handler on Windows On Windows, the encodings and error handlers are different: Function Default Legacy Windows FS encoding UTF-8 Mode ------------------------------ ------------------------ ---------------------------- ------------------------ open() mbcs/strict mbcs/strict UTF-8/strict os.fsdecode(), os.fsencode() UTF-8/surrogatepass mbcs/replace UTF-8/surrogatepass sys.stdin, sys.stdout UTF-8/surrogateescape UTF-8/surrogateescape UTF-8/surrogateescape sys.stderr UTF-8/backslashreplace UTF-8/backslashreplace UTF-8/backslashreplace By comparison, Python 3.6 uses: Function Default Legacy Windows FS encoding ------------------------------ ------------------------ ---------------------------- open() mbcs/strict mbcs/strict os.fsdecode(), os.fsencode() UTF-8/surrogatepass mbcs/replace sys.stdin, sys.stdout UTF-8/surrogateescape UTF-8/surrogateescape sys.stderr UTF-8/backslashreplace UTF-8/backslashreplace The "Legacy Windows FS encoding" is enabled by the PYTHONLEGACYWINDOWSFSENCODING environment variable. If stdin and/or stdout is redirected to a pipe, sys.stdin and/or sys.output uses mbcs encoding by default rather than UTF-8. But in UTF-8 Mode, sys.stdin and sys.stdout always use the UTF-8 encoding. Note There is no POSIX locale on Windows. The ANSI code page is used as the locale encoding, and this code page never uses the ASCII encoding. Links - bpo-29240: Implementation of the PEP 540: Add a new UTF-8 Mode - PEP 538: "Coercing the legacy C locale to C.UTF-8" - PEP 529: "Change Windows filesystem encoding to UTF-8" - PEP 528: "Change Windows console encoding to UTF-8" - PEP 383: "Non-decodable Bytes in System Character Interfaces" Post History - 2017-12: [Python-Dev] PEP 540: Add a new UTF-8 Mode - 2017-04: [Python-Dev] Proposed BDFL Delegate update for PEPs 538 & 540 (assuming UTF-8 for *nix system boundaries) - 2017-01: [Python-ideas] PEP 540: Add a new UTF-8 Mode - 2017-01: bpo-28180: Implementation of the PEP 538: coerce C locale to C.utf-8 (msg284764) - 2016-08-17: bpo-27781: Change sys.getfilesystemencoding() on Windows to UTF-8 (msg272916) -- Victor proposed -X utf8 for the PEP 529 (Change Windows filesystem encoding to UTF-8) Version History - Version 4: locale.getpreferredencoding() now returns 'UTF-8' in the UTF-8 Mode. - Version 3: The UTF-8 Mode does not change the open() default error handler (strict) anymore, and the Strict UTF-8 Mode has been removed. - Version 2: Rewrite the PEP from scratch to make it much shorter and easier to understand. - Version 1: First version posted to python-dev. Copyright This document has been placed in the public domain.
python-peps
2024-10-18T13:23:32.374376
2016-01-05T00:00:00
{ "license": "Public Domain", "url": "https://peps.python.org/pep-0540/", "authors": [ "Victor Stinner" ], "pep_number": "0540", "pandoc_version": "3.5" }
0595
PEP: 595 Title: Improving bugs.python.org Author: Ezio Melotti <[email protected]>, Berker Peksag <[email protected]> BDFL-Delegate: Barry Warsaw <[email protected]> Status: Withdrawn Type: Informational Content-Type: text/x-rst Created: 12-May-2019 Abstract This PEP proposes a list of improvements to make bugs.python.org more usable for contributors and core developers. This PEP also discusses why remaining on Roundup should be preferred over switching to GitHub Issues, as proposed by PEP 581. Resolution 2020-06-25: With the acceptance of PEP 581, the move to GitHub for issues is proceeding, this PEP is being marked as a withdrawn informational PEP. Motivation On May 14th, 2019 PEP 581 has been accepted without much public discussion and without a clear consensus. The PEP contains factual errors and doesn't address some of the issues that the migration to GitHub Issues might present. Given the scope of the migration, the amount of work required, and how it will negatively affect the workflow during the transition phase, this decision should be re-evaluated. Roundup advantages over GitHub Issues This section discusses reasons why Roundup should be preferred over GitHub Issues and Roundup features that are not available on GitHub Issues. - Roundup is the status quo. Roundup has been an integral part of the CPython workflow for years. It is a stable product that has been tested and customized to adapt to our needs as the workflow evolved. It is possible to gradually improve it and avoid the disruption that a switch to a different system would inevitably bring to the workflow. - Open-source and Python powered. Roundup is an open-source project and is written in Python. By using it and supporting it, we also support the Python ecosystem. Several features developed for bpo have also been ported to upstream Roundup over the years. - Fully customizable. Roundup can be (and has been) fully customized to fit our needs. - Finer-grained access control. Roundup allows the creation of different roles with different permissions (e.g. create, view, edit, etc.) for each individual property, and users can have multiple roles. - Flexible UI. While Roundup UI might look dated, it is convenient and flexible. For example, on the issue page, each field (e.g. title, type, versions, status, linked files and PRs, etc.) have appropriate UI elements (input boxes, dropdowns, tables, etc.) that are easy to set and also provide a convenient way to get info about the issue at a glance. The number of fields, their values, and the UI element they use is also fully customizable. GitHub only provides labels. The issue list page presents the issues in a compact and easy to read table with separate columns for different fields. For comparison, Roundup lists 50 issues in a screen, whereas GitHub takes two screens to shows 25 issues. - Advanced search. Roundup provides an accurate way to search and filter by using any combination of issue fields. It is also possible to customize the number of results and the fields displayed in the table, and the sorting and grouping (up to two levels). bpo also provides predefined summaries (e.g. "Created by you", "Assigned to you", etc.) and allows the creation of custom search queries that can be conveniently accessed from the sidebar. - Nosy list autocomplete. The nosy list has an autocomplete feature that suggests maintainers and experts. The suggestions are automatically updated when the experts index changes. - Dependencies and Superseders. Roundup allows to specify dependencies that must be addressed before the current issues can be closed and a superseder issue to easily mark duplicates (for example, bpo-12078). The list of dependencies can also be used to create meta-issues that references several other sub-issues (for example, bpo-26865). Improving Roundup This section lists some of the issues mentioned by PEP 581 and other desired features and discusses how they can be implemented by improving Roundup and/or our instance. - REST API support. A REST API will make integration with other services and the development of new tools and applications easier. Upstream Roundup now supports a REST API. Updating the tracker will make the REST API available. - GitHub login support. This will allow users to login to bugs.python.org (bpo) without having to create a new account. It will also solve issues with confirmation emails being marked as spam, and provide two-factor authentication. A patch to add this functionality is already available and is being integrated at the time of writing. - Markdown support and message preview and editing. This feature will allow the use of Markdown in messages and the ability to preview the message before the submission and edit it afterward. This can be done, but it will take some work. Possible solutions have been proposed on the roundup-devel mailing list. - "Remove me from nosy list" button. Add a button on issue pages to remove self from the nosy list. This feature will be added during GSoC 2019. - Mobile friendly theme. Current theme of bugs.python.org looks dated and it doesn't work well with mobile browsers. A mobile-friendly theme that is more modern but still familiar will be added. - Move reply box close to the last message. The reply box is located at the top of the page, whereas the last message is at the bottom. The reply box can be moved or duplicated after the last message. - Real-time updates. When another users submits changes to an issue, they should show up in real time. This can be accomplished by using the REST API. - Add PR link to BPO emails. Currently bpo emails don't include links to the corresponding PRs. A patch is available to change the content of the bpo emails from: components: +Tkinter versions: +Python 3.4 pull_requests: +42 to: components: +Tkinter versions: +Python 3.4 pull_request: https://github.com/python/cpython/pull/341 - Python 3 support. Using Python 3 will make maintenance easier. Upstream Roundup now supports Python 3. Updating the tracker will allow us to switch to Python 3. The instances will need to be updated as well. - Use upstream Roundup. We currently use a fork of Roundup with a few modifications, most notably the GitHub integration. If this is ported upstream, we can start using upstream Roundup without having to maintain our fork. PEP 581 issues This section addresses some errors and inaccuracies found in PEP 581. The "Why GitHub?" section of PEP 581 lists features currently available on GitHub Issues but not on Roundup. Some of this features are currently supported: - "Ability to reply to issue and pull request conversations via email." - Being able to reply by email has been one of the core features of Roundup since the beginning. It is also possible to create new issues or close existing ones, set or modify fields, and add attachments. - "Email notifications containing metadata, integrated with Gmail, allowing systematic filtering of emails." - Emails sent by Roundup contains metadata that can be used for filtering. - "Additional privacy, such as offering the user a choice to hide an email address, while still allowing communication with the user through @-mentions." - Email addresses are hidden by default to users that are not registered. Registered users can see other users' addresses because we configured the tracker to show them. It can easily be changed if desired. Users can still be added to the nosy list by using their username even if their address is hidden. - "Ability to automatically close issues when a PR has been merged." - The GitHub integration of Roundup automatically closes issues when a commit that contains "fixes issue <id>" is merged. (Alternative spellings such as "closes" or "bug" are also supported.) See this message for a recent example of this feature. - "Support for permalinks, allowing easy quoting and copying & pasting of source code." - Roundup has permalinks for issues, messages, attachments, etc. In addition, Roundup allows to easily rewrite broken URLs in messages (e.g. if the code hosting changes). - "Core developers, volunteers, and the PSF don't have to maintain the issue infrastructure/site, giving us more time and resources to focus on the development of Python." - While this is partially true, additional resources are required to write and maintain bots. In some cases, bots are required to workaround GitHub's lack of features rather than expanding. This webhook was written specifically to workaround GitHub's email integration. Updating our bots to stay up-to-date with changes in the GitHub API has also maintenance cost. This recent incident caused by GitHub took two days to be fixed. In addition, we will still need to maintain Roundup for bpo (even if it becomes read-only) and for the other trackers we currently host/maintain (Jython and Roundup). The "Issues with Roundup / bpo" section of PEP 581 lists some issues that have already been fixed: - "The upstream Roundup code is in Mercurial. Without any CI available, it puts heavy burden on the few existing maintainers in terms of reviewing, testing, and applying patches." - While Roundup uses Mercurial by default, there is a git clone available on GitHub. Roundup also has CI available on Travis CI and Codecov. - "There is no REST API available. There is an open issue in Roundup for adding REST API. Last activity was in 2016." - The REST API has been integrated and it's now available in Roundup. - "Users email addresses are exposed. There is no option to mask it." - Exposing addresses to registered and logged in users was a decision taken when our instance was set up. This has now been changed to make the email addresses hidden for regular users too (Developers and Coordinators can still see them). The "Email address" column from the user listing page has been removed too. - "It sends a number of unnecessary emails and notifications, and it is difficult, if not impossible, to configure." - This can be configured. - "Creating an account has been a hassle. There have been reports of people having trouble creating accounts or logging in." - The main issue is confirmation emails being marked as spam. Work has been done to resolve the issue. Migration considerations This section describes issues with the migrations that might not have been addressed by PEP 581 and PEP 588. PEP 588 suggests to add a button to migrate issues to GitHub only when someone wants to keep working on them. This approach has several issues, but there are also other issues that will need to be addressed regardless of the approach used: - Vendor lock-in. GitHub is proprietary and there is risk of vendor lock-in. Their business model might change and they could shut down altogether. For example, several projects decided to move away from GitHub after Microsoft acquisition. If/when the repository is no longer available on GitHub, we will be forced to migrate again and all the links to the issues won't work anymore. - Required bpo updates. bpo will need to be updated in order to add a button that, once pressed, creates a new issue on GitHub, copies over all the messages, attachments, and creates/adds labels for the existing fields. Permissions will also need to be tweaked to make individual issues read-only once they are migrated, and to prevent users to create new accounts. It might be necessary to set up redirects (see below). - Two trackers. If issues are migrated on demand, the issues will be split between two trackers. Referencing and searching issues will take significant more effort. - Lossy conversion. GitHub only mechanism to add custom metadata is through labels. bpo uses a number of fields to specify several different metadata. Preserving all fields and values will result in too many labels. If only some fields and values are preserved the others will be lost (unless there is a way to preserve them elsewhere). - Issue IDs preservation. GitHub doesn't provide a way to set and preserve the ID of migrated issues. Some projects managed to preserve the IDs by contacting the GitHub staff and migrating the issues en masse. However, this is no longer possible, since PRs and issues share the same namespace and PRs already use existing bpo issue IDs. - Internal issue links preservation. Existing issues might contain references to other issues in messages and fields (e.g. dependencies or superseder). Since the issue ID will change during the migration, these will need to be updated. If the issues are migrated on demand, all the existing internal references to the migrated issues (on both bpo and GitHub issues) will have to be updated. Setting up a redirect for each migrated issue on bpo might mitigate the issue, however -- if references in migrated messages are not updated -- it will cause confusion (e.g. if bpo issue #1234 becomes GitHub issue #4321, a reference to #1234 in a migrated message could link to bpo #1234 and bpo can redirect to GitHub issue #4321, but new references to #1234 will link to GitHub PR #1234 rather than GitHub issue #4321). Manually having to specify a bpo- or gh- prefix is error prone. - External issue links preservation. A number of websites, mails, etc. link to bpo issues. If bpo is shut down, these links will break. If we don't want to break the links, we will have to keep bpo alive and set up a redirect system that links to the corresponding GitHub issue. In addition, if GitHub shuts down, we won't have any way to setup redirects and preserve external links to GitHub issues. - References preservation and updating. In addition to issue references, bpo converts a number of other references into links, including message and PR IDs, changeset numbers, legacy SVN revision numbers, paths to files in the repo, files in tracebacks (detecting the correct branch), and links to devguide pages and sections. Since Roundup converts references to links when messages are requested, it is possible to update the target and generate the correct link. This need already arose several times, for example: files and HG changesets moved from hg.python.org to GitHub and the devguide moved from docs.python.org/devguide to devguide.python.org. Since messages on GitHub are static, the links will need to be generated and hardcoded during the migration or they will be lost. In order to update them, a tool to find all references and regenerate the links will need to be written. - Roundup and bpo maintenance. On top of the aforementioned changes to bpo and development of tools required to migrate to GitHub issues, we will still need to keep running and maintaining Roundup, both for our bpo instance (read-only) and for the Jython and Roundup trackers (read-write). Even if eventually we migrate all bpo issues to GitHub and we stop maintaining Jython and Roundup, bpo will need to be maintained and redirect to the corresponding GitHub issues. - Bots maintenance. Since it's not possible to customize GitHub directly, it's also necessary to write, maintain, and host bots. Even if eventually we stop maintaining Roundup, the maintenance burden simply shifted from Roundup to the bots. Hosting each different bot also has a monetary cost. - Using issue templates. Manually editing issue templates to "remove texts that don't apply to [the] issue" is cumbersome and error-prone. - Signal to noise ratio. Switching to GitHub Issues will likely increase the number of invalid reports and increase the triaging effort. This concern has been raised in the past in a Zulip topic. There have been already cases where people posted comments on PRs that required moderators to mark them as off-topic or disruptive, delete them altogether, and even lock the conversation (for example, this PR. - Weekly tracker reports and stats. Roundup sends weekly reports to python-dev with a summary that includes new issues, recent issues with no replies, recent issues waiting for review, most discussed issues, closed issues, and deltas for open/closed/total issue counts (for example, see this summary). The report provides an easy way to keep track of the tracker activity and to make sure that issues that require attention are noticed. The data collect by the weekly report is also used to generate statistics and graphs that can be used to gain new insights. - bpo-related MLs. There are currently two mailing lists where bpo posts new tracker issues and all messages respectively: new-bugs-announce and python-bugs-list. A new system will need to be developed to preserve this functionality. These MLs offer additional ways to keep track of the tracker activity. Copyright This document has been placed in the public domain.
python-peps
2024-10-18T13:23:32.400624
2019-05-12T00:00:00
{ "license": "Public Domain", "url": "https://peps.python.org/pep-0595/", "authors": [ "Berker Peksag", "Ezio Melotti" ], "pep_number": "0595", "pandoc_version": "3.5" }
0576
PEP: 576 Title: Rationalize Built-in function classes Author: Mark Shannon <[email protected]> BDFL-Delegate: Petr Viktorin Status: Withdrawn Type: Standards Track Content-Type: text/x-rst Created: 10-May-2018 Python-Version: 3.8 Post-History: 17-May-2018, 23-Jun-2018, 08-Jul-2018, 29-Mar-2019 Abstract Expose the "FastcallKeywords" convention used internally by CPython to third-party code, and make the inspect module use duck-typing. In combination this will allow third-party C extensions and tools like Cython to create objects that use the same calling conventions as built-in and Python functions, thus gaining performance parity with built-in functions like len or print. A small improvement in the performance of existing code is expected. Motivation Currently third-party module authors face a dilemma when implementing functions in C. Either they can use one of the pre-existing built-in function or method classes or implement their own custom class in C. The first choice causes them to lose the ability to access the internals of the callable object. The second choice is an additional maintenance burden and, more importantly, has a significant negative impact on performance. This PEP aims to allow authors of third-party C modules, and tools like to Cython, to utilize the faster calling convention used internally by CPython for built-in functions and methods, and to do so without a loss of capabilities relative to a function implemented in Python. Introspection The inspect module will fully support duck-typing when introspecting callables. The inspect.Signature.from_callable() function computes the signature of a callable. If an object has a __signature__ property, then inspect.Signature.from_callable() simply returns that. To further support duck-typing, if a callable has a __text_signature__ then the __signature__ will be created from that. This means that 3rd party builtin-functions can implement __text_signature__ if sufficient, and the more expensive __signature__ if necessary. Efficient calls to third-party callables Currently the majority of calls are dispatched to functions and method_descriptors in custom code, using the "FastcallKeywords" internal calling convention. This PEP proposes that this calling convention is implemented via a C function pointer. Third-party callables which implement this binary interface will have the potential to be called as fast as a built-in function. Continued prohibition of callable classes as base classes Currently any attempt to use function, method or method_descriptor as a base class for a new class will fail with a TypeError. This behaviour is desirable as it prevents errors when a subclass overrides the __call__ method. If callables could be sub-classed then any call to a function or a method_descriptor would need an additional check that the __call__ method had not been overridden. By exposing an additional call mechanism, the potential for errors becomes greater. As a consequence, any third-party class implementing the addition call interface will not be usable as a base class. New classes and changes to existing classes Python visible changes 1. A new built-in class, builtin_function, will be added. 2. types.BuiltinFunctionType will refer to builtin_function not builtin_function_or_method. 3. Instances of the builtin_function class will retain the __module__ property of builtin_function_or_method and gain the func_module and func_globals properties. The func_module allows access to the module to which the function belongs. Note that this is different from the __module__ property which merely returns the name of the module. The func_globals property is equivalent to func_module.__dict__ and is provided to mimic the Python function property of the same name. 4. When binding a method_descriptor instance to an instance of its owning class, a bound_method will be created instead of a builtin_function_or_method. This means that the method_descriptors now mimic the behaviour of Python functions more closely. In other words, [].append becomes a bound_method instead of a builtin_function_or_method. C API changes 1. A new function PyBuiltinFunction_New(PyMethodDef *ml, PyObject *module) is added to create built-in functions. 2. PyCFunction_NewEx() and PyCFunction_New() are deprecated and will return a PyBuiltinFunction if able, otherwise a builtin_function_or_method. Retaining backwards compatibility in the C API and ABI The proposed changes are fully backwards and forwards compatible at both the API and ABI level. Internal C changes Two new flags will be allowed for the typeobject.tp_flags field. These are Py_TPFLAGS_EXTENDED_CALL and Py_TPFLAGS_FUNCTION_DESCRIPTOR Py_TPFLAGS_EXTENDED_CALL For any built-in class that sets Py_TPFLAGS_EXTENDED_CALL The C struct corresponding to this built-in class must begin with the struct PyExtendedCallable which is defined as follows: typedef PyObject *(*extended_call_ptr)(PyObject *callable, PyObject** args, int positional_argcount, PyTupleObject* kwnames); typedef struct { PyObject_HEAD extended_call_ptr ext_call; } PyExtendedCallable; Any class that sets the Py_TPFLAGS_EXTENDED_CALL cannot be used as a base class and a TypeError will be raised if any Python code tries to use it a base class. Py_TPFLAGS_FUNCTION_DESCRIPTOR If this flag is set for a built-in class F, then instances of that class are expected to behave the same as a Python function when used as a class attribute. Specifically, this mean that the value of c.m where C.m is an instanceof the built-in class F (and c is an instance of C) must be a bound-method binding C.m and c. Without this flag, it would be impossible for custom callables to behave like Python functions and be efficient as Python or built-in functions. Changes to existing C structs The function, method_descriptor and method classes will have their corresponding structs changed to start with the PyExtendedCallable struct. Third-party built-in classes using the new extended call interface To enable call performance on a par with Python functions and built-in functions, third-party callables should set the Py_TPFLAGS_EXTENDED_CALL bit of tp_flags and ensure that the corresponding C struct starts with the PyExtendedCallable. Any built-in class that has the Py_TPFLAGS_EXTENDED_CALL bit set must also implement the tp_call function and make sure its behaviour is consistent with the ext_call function. Performance implications of these changes Adding a function pointer to each callable, rather than each class of callable, enables the choice of dispatching function (the code to shuffle arguments about and do error checking) to be made when the callable object is created rather than when it is called. This should reduce the number of instructions executed between the call-site in the interpreter and the execution of the callee. Alternative Suggestions PEP 580 is an alternative approach to solving the same problem as this PEP. Reference implementation A draft implementation can be found at https://github.com/markshannon/cpython/tree/pep-576-minimal Copyright This document has been placed in the public domain.
python-peps
2024-10-18T13:23:32.413041
2018-05-10T00:00:00
{ "license": "Public Domain", "url": "https://peps.python.org/pep-0576/", "authors": [ "Mark Shannon" ], "pep_number": "0576", "pandoc_version": "3.5" }
3130
PEP: 3130 Title: Access to Current Module/Class/Function Version: $Revision$ Last-Modified: $Date$ Author: Jim J. Jewett <[email protected]> Status: Rejected Type: Standards Track Content-Type: text/x-rst Created: 22-Apr-2007 Python-Version: 3.0 Post-History: 22-Apr-2007 Rejection Notice This PEP is rejected. It is not clear how it should be implemented or what the precise semantics should be in edge cases, and there aren't enough important use cases given. response has been lukewarm at best. Abstract It is common to need a reference to the current module, class, or function, but there is currently no entirely correct way to do this. This PEP proposes adding the keywords __module__, __class__, and __function__. Rationale for __module__ Many modules export various functions, classes, and other objects, but will perform additional activities (such as running unit tests) when run as a script. The current idiom is to test whether the module's name has been set to magic value. if __name__ == "__main__": ... More complicated introspection requires a module to (attempt to) import itself. If importing the expected name actually produces a different module, there is no good workaround. # __import__ lets you use a variable, but... it gets more # complicated if the module is in a package. __import__(__name__) # So just go to sys modules... and hope that the module wasn't # hidden/removed (perhaps for security), that __name__ wasn't # changed, and definitely hope that no other module with the # same name is now available. class X(object): pass import sys mod = sys.modules[__name__] mod = sys.modules[X.__class__.__module__] Proposal: Add a __module__ keyword which refers to the module currently being defined (executed). (But see open issues.) # XXX sys.main is still changing as draft progresses. May # really need sys.modules[sys.main] if __module__ is sys.main: # assumes PEP (3122), Cannon ... Rationale for __class__ Class methods are passed the current instance; from this they can determine self.__class__ (or cls, for class methods). Unfortunately, this reference is to the object's actual class, which may be a subclass of the defining class. The current workaround is to repeat the name of the class, and assume that the name will not be rebound. class C(B): def meth(self): super(C, self).meth() # Hope C is never rebound. class D(C): def meth(self): # ?!? issubclass(D,C), so it "works": super(C, self).meth() Proposal: Add a __class__ keyword which refers to the class currently being defined (executed). (But see open issues.) class C(B): def meth(self): super(__class__, self).meth() Note that super calls may be further simplified by the "New Super" PEP (Spealman). The __class__ (or __this_class__) attribute came up in attempts to simplify the explanation and/or implementation of that PEP, but was separated out as an independent decision. Note that __class__ (or __this_class__) is not quite the same as the __thisclass__ property on bound super objects. The existing super.__thisclass__ property refers to the class from which the Method Resolution Order search begins. In the above class D, it would refer to (the current reference of name) C. Rationale for __function__ Functions (including methods) often want access to themselves, usually for a private storage location or true recursion. While there are several workarounds, all have their drawbacks. def counter(_total=[0]): # _total shouldn't really appear in the # signature at all; the list wrapping and # [0] unwrapping obscure the code _total[0] += 1 return _total[0] @annotate(total=0) def counter(): # Assume name counter is never rebound: counter.total += 1 return counter.total # class exists only to provide storage: class _wrap(object): __total = 0 def f(self): self.__total += 1 return self.__total # set module attribute to a bound method: accum = _wrap().f # This function calls "factorial", which should be itself -- # but the same programming styles that use heavy recursion # often have a greater willingness to rebind function names. def factorial(n): return (n * factorial(n-1) if n else 1) Proposal: Add a __function__ keyword which refers to the function (or method) currently being defined (executed). (But see open issues.) @annotate(total=0) def counter(): # Always refers to this function obj: __function__.total += 1 return __function__.total def factorial(n): return (n * __function__(n-1) if n else 1) Backwards Compatibility While a user could be using these names already, double-underscore names ( __anything__ ) are explicitly reserved to the interpreter. It is therefore acceptable to introduce special meaning to these names within a single feature release. Implementation Ideally, these names would be keywords treated specially by the bytecode compiler. Guido has suggested[1] using a cell variable filled in by the metaclass. Michele Simionato has provided a prototype using bytecode hacks[2]. This does not require any new bytecode operators; it just modifies the which specific sequence of existing operators gets run. Open Issues - Are __module__, __class__, and __function__ the right names? In particular, should the names include the word "this", either as __this_module__, __this_class__, and __this_function__, (format discussed on the python-3000 and python-ideas lists) or as __thismodule__, __thisclass__, and __thisfunction__ (inspired by, but conflicting with, current usage of super.__thisclass__). - Are all three keywords needed, or should this enhancement be limited to a subset of the objects? Should methods be treated separately from other functions? References Copyright This document has been placed in the public domain. [1] Fixing super anyone? Guido van Rossum https://mail.python.org/pipermail/python-3000/2007-April/006671.html [2] Descriptor/Decorator challenge, Michele Simionato http://groups.google.com/group/comp.lang.python/browse_frm/thread/a6010c7494871bb1/62a2da68961caeb6?lnk=gst&q=simionato+challenge&rnum=1&hl=en#62a2da68961caeb6
python-peps
2024-10-18T13:23:32.422498
2007-04-22T00:00:00
{ "license": "Public Domain", "url": "https://peps.python.org/pep-3130/", "authors": [ "Jim J. Jewett" ], "pep_number": "3130", "pandoc_version": "3.5" }
0429
PEP: 429 Title: Python 3.4 Release Schedule Version: $Revision$ Last-Modified: $Date$ Author: Larry Hastings <[email protected]> Status: Final Type: Informational Topic: Release Content-Type: text/x-rst Created: 17-Oct-2012 Python-Version: 3.4 Abstract This document describes the development and release schedule for Python 3.4. The schedule primarily concerns itself with PEP-sized items. Release Manager and Crew - 3.4 Release Manager: Larry Hastings - Windows installers: Martin v. Löwis - Mac installers: Ned Deily - Documentation: Georg Brandl Release Schedule Python 3.4 has now reached its end-of-life and has been retired. No more releases will be made. These are all the historical releases of Python 3.4, including their release dates. - 3.4.0 alpha 1: August 3, 2013 - 3.4.0 alpha 2: September 9, 2013 - 3.4.0 alpha 3: September 29, 2013 - 3.4.0 alpha 4: October 20, 2013 - 3.4.0 beta 1: November 24, 2013 - 3.4.0 beta 2: January 5, 2014 - 3.4.0 beta 3: January 26, 2014 - 3.4.0 candidate 1: February 10, 2014 - 3.4.0 candidate 2: February 23, 2014 - 3.4.0 candidate 3: March 9, 2014 - 3.4.0 final: March 16, 2014 - 3.4.1 candidate 1: May 5, 2014 - 3.4.1 final: May 18, 2014 - 3.4.2 candidate 1: September 22, 2014 - 3.4.2 final: October 6, 2014 - 3.4.3 candidate 1: February 8, 2015 - 3.4.3 final: February 25, 2015 - 3.4.4 candidate 1: December 6, 2015 - 3.4.4 final: December 20, 2015 - 3.4.5 candidate 1: June 12, 2016 - 3.4.5 final: June 26, 2016 - 3.4.6 candidate 1: January 2, 2017 - 3.4.6 final: January 17, 2017 - 3.4.7 candidate 1: July 25, 2017 - 3.4.7 final: August 9, 2017 - 3.4.8 candidate 1: January 23, 2018 - 3.4.8 final: February 4, 2018 - 3.4.9 candidate 1: July 19, 2018 - 3.4.9 final: August 2, 2018 - 3.4.10 candidate 1: March 4, 2019 - 3.4.10 final: March 18, 2019 Features for 3.4 Implemented / Final PEPs: - PEP 428, a "pathlib" module providing object-oriented filesystem paths - PEP 435, a standardized "enum" module - PEP 436, a build enhancement that will help generate introspection information for builtins - PEP 442, improved semantics for object finalization - PEP 443, adding single-dispatch generic functions to the standard library - PEP 445, a new C API for implementing custom memory allocators - PEP 446, changing file descriptors to not be inherited by default in subprocesses - PEP 450, a new "statistics" module - PEP 451, standardizing module metadata for Python's module import system - PEP 453, a bundled installer for the pip package manager - PEP 454, a new "tracemalloc" module for tracing Python memory allocations - PEP 456, a new hash algorithm for Python strings and binary data - PEP 3154, a new and improved protocol for pickled objects - PEP 3156, a new "asyncio" module, a new framework for asynchronous I/O Deferred to post-3.4: - PEP 431, improved support for time zone databases - PEP 441, improved Python zip application support - PEP 447, support for __locallookup__ metaclass method - PEP 448, additional unpacking generalizations - PEP 455, key transforming dictionary Copyright This document has been placed in the public domain. Local Variables: mode: indented-text indent-tabs-mode: nil sentence-end-double-space: t fill-column: 70 coding: utf-8 End:
python-peps
2024-10-18T13:23:32.433786
2012-10-17T00:00:00
{ "license": "Public Domain", "url": "https://peps.python.org/pep-0429/", "authors": [ "Larry Hastings" ], "pep_number": "0429", "pandoc_version": "3.5" }
0336
PEP: 336 Title: Make None Callable Version: $Revision$ Last-Modified: $Date$ Author: Andrew McClelland <[email protected]> Status: Rejected Type: Standards Track Content-Type: text/x-rst Created: 28-Oct-2004 Post-History: Abstract None should be a callable object that when called with any arguments has no side effect and returns None. BDFL Pronouncement This PEP is rejected. It is considered a feature that None raises an error when called. The proposal falls short in tests for obviousness, clarity, explicitness, and necessity. The provided Switch example is nice but easily handled by a simple lambda definition. See python-dev discussion on 17 June 2005[1]. Motivation To allow a programming style for selectable actions that is more in accordance with the minimalistic functional programming goals of the Python language. Rationale Allow the use of None in method tables as a universal no effect rather than either (1) checking a method table entry against None before calling, or (2) writing a local no effect method with arguments similar to other functions in the table. The semantics would be effectively: class None: def __call__(self, *args): pass How To Use Before, checking function table entry against None: class Select: def a(self, input): print 'a' def b(self, input): print 'b' def c(self, input): print 'c' def __call__(self, input): function = { 1 : self.a, 2 : self.b, 3 : self.c }.get(input, None) if function: return function(input) Before, using a local no effect method: class Select: def a(self, input): print 'a' def b(self, input): print 'b' def c(self, input): print 'c' def nop(self, input): pass def __call__(self, input): return { 1 : self.a, 2 : self.b, 3 : self.c }.get(input, self.nop)(input) After: class Select: def a(self, input): print 'a' def b(self, input): print 'b' def c(self, input): print 'c' def __call__(self, input): return { 1 : self.a, 2 : self.b, 3 : self.c }.get(input, None)(input) References Copyright This document has been placed in the public domain. [1] Raymond Hettinger, Propose to reject PEP 336 -- Make None Callable https://mail.python.org/pipermail/python-dev/2005-June/054280.html
python-peps
2024-10-18T13:23:32.440032
2004-10-28T00:00:00
{ "license": "Public Domain", "url": "https://peps.python.org/pep-0336/", "authors": [ "Andrew McClelland" ], "pep_number": "0336", "pandoc_version": "3.5" }
0216
PEP: 216 Title: Docstring Format Author: Moshe Zadka <[email protected]> Status: Withdrawn Type: Informational Created: 31-Jul-2000 Post-History: Superseded-By: 287 It has been superseded by PEP 287. Abstract Named Python objects, such as modules, classes and functions, have a string attribute called __doc__. If the first expression inside the definition is a literal string, that string is assigned to the __doc__ attribute. The __doc__ attribute is called a documentation string, or docstring. It is often used to summarize the interface of the module, class or function. However, since there is no common format for documentation string, tools for extracting docstrings and transforming those into documentation in a standard format (e.g., DocBook) have not sprang up in abundance, and those that do exist are for the most part unmaintained and unused. Perl Documentation In Perl, most modules are documented in a format called POD -- Plain Old Documentation. This is an easy-to-type, very low level format which integrates well with the Perl parser. Many tools exist to turn POD documentation into other formats: info, HTML and man pages, among others. However, in Perl, the information is not available at run-time. Java Documentation In Java, special comments before classes and functions function to document the code. A program to extract these, and turn them into HTML documentation is called javadoc, and is part of the standard Java distribution. However, the only output format that is supported is HTML, and JavaDoc has a very intimate relationship with HTML. Python Docstring Goals Python documentation string are easy to spot during parsing, and are also available to the runtime interpreter. This double purpose is a bit problematic, sometimes: for example, some are reluctant to have too long docstrings, because they do not want to take much space in the runtime. In addition, because of the current lack of tools, people read objects' docstrings by "print"ing them, so a tendency to make them brief and free of markups has sprung up. This tendency hinders writing better documentation-extraction tools, since it causes docstrings to contain little information, which is hard to parse. High Level Solutions To counter the objection that the strings take up place in the running program, it is suggested that documentation extraction tools will concatenate a maximum prefix of string literals which appear in the beginning of a definition. The first of these will also be available in the interactive interpreter, so it should contain a few summary lines. Docstring Format Goals These are the goals for the docstring format, as discussed ad nauseam in the doc-sig. 1. It must be easy to type with any standard text editor. 2. It must be readable to the casual observer. 3. It must not contain information which can be deduced from parsing the module. 4. It must contain sufficient information so it can be converted to any reasonable markup format. 5. It must be possible to write a module's entire documentation in docstrings, without feeling hampered by the markup language. Docstring Contents For requirement 5. above, it is needed to specify what must be in docstrings. At least the following must be available: a. A tag that means "this is a Python something, guess what" Example: In the sentence "The POP3 class", we need to markup "POP3" so. The parser will be able to guess it is a class from the contents of the poplib module, but we need to make it guess. b. Tags that mean "this is a Python class/module/class var/instance var..." Example: The usual Python idiom for singleton class A is to have _A as the class, and A a function which returns _A objects. It's usual to document the class, nonetheless, as being A. This requires the strength to say "The class A" and have A hyperlinked and marked-up as a class. c. An easy way to include Python source code/Python interactive sessions d. Emphasis/bold e. List/tables Docstring Basic Structure The documentation strings will be in StructuredTextNG (http://www.zope.org/Members/jim/StructuredTextWiki/StructuredTextNG) Since StructuredText is not yet strong enough to handle (a) and (b) above, we will need to extend it. I suggest using [<optional description>:python identifier]. E.g.: [class:POP3], [:POP3.list], etc. If the description is missing, a guess will be made from the text. Unresolved Issues Is there a way to escape characters in ST? If so, how? (example: * at the beginning of a line without being bullet symbol) Is my suggestion above for Python symbols compatible with ST-NG? How hard would it be to extend ST-NG to support it? How do we describe input and output types of functions? What additional constraint do we enforce on each docstring? (module/class/function)? What are the guesser rules? Rejected Suggestions XML -- it's very hard to type, and too cluttered to read it comfortably.
python-peps
2024-10-18T13:23:32.448319
2000-07-31T00:00:00
{ "license": "Public Domain", "url": "https://peps.python.org/pep-0216/", "authors": [ "Moshe Zadka" ], "pep_number": "0216", "pandoc_version": "3.5" }
0498
PEP: 498 Title: Literal String Interpolation Version: $Revision$ Last-Modified: $Date$ Author: Eric V. Smith <[email protected]> Status: Final Type: Standards Track Content-Type: text/x-rst Created: 01-Aug-2015 Python-Version: 3.6 Post-History: 07-Aug-2015, 30-Aug-2015, 04-Sep-2015, 19-Sep-2015, 06-Nov-2016 Resolution: https://mail.python.org/pipermail/python-dev/2015-September/141526.html Abstract Python supports multiple ways to format text strings. These include %-formatting[1], str.format()[2], and string.Template [3]. Each of these methods have their advantages, but in addition have disadvantages that make them cumbersome to use in practice. This PEP proposed to add a new string formatting mechanism: Literal String Interpolation. In this PEP, such strings will be referred to as "f-strings", taken from the leading character used to denote such strings, and standing for "formatted strings". This PEP does not propose to remove or deprecate any of the existing string formatting mechanisms. F-strings provide a way to embed expressions inside string literals, using a minimal syntax. It should be noted that an f-string is really an expression evaluated at run time, not a constant value. In Python source code, an f-string is a literal string, prefixed with 'f', which contains expressions inside braces. The expressions are replaced with their values. Some examples are: >>> import datetime >>> name = 'Fred' >>> age = 50 >>> anniversary = datetime.date(1991, 10, 12) >>> f'My name is {name}, my age next year is {age+1}, my anniversary is {anniversary:%A, %B %d, %Y}.' 'My name is Fred, my age next year is 51, my anniversary is Saturday, October 12, 1991.' >>> f'He said his name is {name!r}.' "He said his name is 'Fred'." A similar feature was proposed in PEP 215. PEP 215 proposed to support a subset of Python expressions, and did not support the type-specific string formatting (the __format__() method) which was introduced with PEP 3101. Rationale This PEP is driven by the desire to have a simpler way to format strings in Python. The existing ways of formatting are either error prone, inflexible, or cumbersome. %-formatting is limited as to the types it supports. Only ints, strs, and doubles can be formatted. All other types are either not supported, or converted to one of these types before formatting. In addition, there's a well-known trap where a single value is passed: >>> msg = 'disk failure' >>> 'error: %s' % msg 'error: disk failure' But if msg were ever to be a tuple, the same code would fail: >>> msg = ('disk failure', 32) >>> 'error: %s' % msg Traceback (most recent call last): File "<stdin>", line 1, in <module> TypeError: not all arguments converted during string formatting To be defensive, the following code should be used: >>> 'error: %s' % (msg,) "error: ('disk failure', 32)" str.format() was added to address some of these problems with %-formatting. In particular, it uses normal function call syntax (and therefore supports multiple parameters) and it is extensible through the __format__() method on the object being converted to a string. See PEP 3101 for a detailed rationale. This PEP reuses much of the str.format() syntax and machinery, in order to provide continuity with an existing Python string formatting mechanism. However, str.format() is not without its issues. Chief among them is its verbosity. For example, the text value is repeated here: >>> value = 4 * 20 >>> 'The value is {value}.'.format(value=value) 'The value is 80.' Even in its simplest form there is a bit of boilerplate, and the value that's inserted into the placeholder is sometimes far removed from where the placeholder is situated: >>> 'The value is {}.'.format(value) 'The value is 80.' With an f-string, this becomes: >>> f'The value is {value}.' 'The value is 80.' F-strings provide a concise, readable way to include the value of Python expressions inside strings. In this sense, string.Template and %-formatting have similar shortcomings to str.format(), but also support fewer formatting options. In particular, they do not support the __format__ protocol, so that there is no way to control how a specific object is converted to a string, nor can it be extended to additional types that want to control how they are converted to strings (such as Decimal and datetime). This example is not possible with string.Template: >>> value = 1234 >>> f'input={value:#06x}' 'input=0x04d2' And neither %-formatting nor string.Template can control formatting such as: >>> date = datetime.date(1991, 10, 12) >>> f'{date} was on a {date:%A}' '1991-10-12 was on a Saturday' No use of globals() or locals() In the discussions on python-dev[4], a number of solutions where presented that used locals() and globals() or their equivalents. All of these have various problems. Among these are referencing variables that are not otherwise used in a closure. Consider: >>> def outer(x): ... def inner(): ... return 'x={x}'.format_map(locals()) ... return inner ... >>> outer(42)() Traceback (most recent call last): File "<stdin>", line 1, in <module> File "<stdin>", line 3, in inner KeyError: 'x' This returns an error because the compiler has not added a reference to x inside the closure. You need to manually add a reference to x in order for this to work: >>> def outer(x): ... def inner(): ... x ... return 'x={x}'.format_map(locals()) ... return inner ... >>> outer(42)() 'x=42' In addition, using locals() or globals() introduces an information leak. A called routine that has access to the callers locals() or globals() has access to far more information than needed to do the string interpolation. Guido stated[5] that any solution to better string interpolation would not use locals() or globals() in its implementation. (This does not forbid users from passing locals() or globals() in, it just doesn't require it, nor does it allow using these functions under the hood.) Specification In source code, f-strings are string literals that are prefixed by the letter 'f' or 'F'. Everywhere this PEP uses 'f', 'F' may also be used. 'f' may be combined with 'r' or 'R', in either order, to produce raw f-string literals. 'f' may not be combined with 'b': this PEP does not propose to add binary f-strings. 'f' may not be combined with 'u'. When tokenizing source files, f-strings use the same rules as normal strings, raw strings, binary strings, and triple quoted strings. That is, the string must end with the same character that it started with: if it starts with a single quote it must end with a single quote, etc. This implies that any code that currently scans Python code looking for strings should be trivially modifiable to recognize f-strings (parsing within an f-string is another matter, of course). Once tokenized, f-strings are parsed in to literal strings and expressions. Expressions appear within curly braces '{' and '}'. While scanning the string for expressions, any doubled braces '{{' or '}}' inside literal portions of an f-string are replaced by the corresponding single brace. Doubled literal opening braces do not signify the start of an expression. A single closing curly brace '}' in the literal portion of a string is an error: literal closing curly braces must be doubled '}}' in order to represent a single closing brace. The parts of the f-string outside of braces are literal strings. These literal portions are then decoded. For non-raw f-strings, this includes converting backslash escapes such as '\n', '\"', "\'", '\xhh', '\uxxxx', '\Uxxxxxxxx', and named unicode characters '\N{name}' into their associated Unicode characters[6]. Backslashes may not appear anywhere within expressions. Comments, using the '#' character, are not allowed inside an expression. Following each expression, an optional type conversion may be specified. The allowed conversions are '!s', '!r', or '!a'. These are treated the same as in str.format(): '!s' calls str() on the expression, '!r' calls repr() on the expression, and '!a' calls ascii() on the expression. These conversions are applied before the call to format(). The only reason to use '!s' is if you want to specify a format specifier that applies to str, not to the type of the expression. F-strings use the same format specifier mini-language as str.format. Similar to str.format(), optional format specifiers maybe be included inside the f-string, separated from the expression (or the type conversion, if specified) by a colon. If a format specifier is not provided, an empty string is used. So, an f-string looks like: f ' <text> { <expression> <optional !s, !r, or !a> <optional : format specifier> } <text> ... ' The expression is then formatted using the __format__ protocol, using the format specifier as an argument. The resulting value is used when building the value of the f-string. Note that __format__() is not called directly on each value. The actual code uses the equivalent of type(value).__format__(value, format_spec), or format(value, format_spec). See the documentation of the builtin format() function for more details. Expressions cannot contain ':' or '!' outside of strings or parentheses, brackets, or braces. The exception is that the '!=' operator is allowed as a special case. Escape sequences Backslashes may not appear inside the expression portions of f-strings, so you cannot use them, for example, to escape quotes inside f-strings: >>> f'{\'quoted string\'}' File "<stdin>", line 1 SyntaxError: f-string expression part cannot include a backslash You can use a different type of quote inside the expression: >>> f'{"quoted string"}' 'quoted string' Backslash escapes may appear inside the string portions of an f-string. Note that the correct way to have a literal brace appear in the resulting string value is to double the brace: >>> f'{{ {4*10} }}' '{ 40 }' >>> f'{{{4*10}}}' '{40}' Like all raw strings in Python, no escape processing is done for raw f-strings: >>> fr'x={4*10}\n' 'x=40\\n' Due to Python's string tokenizing rules, the f-string f'abc {a['x']} def' is invalid. The tokenizer parses this as 3 tokens: f'abc {a[', x, and ']} def'. Just like regular strings, this cannot be fixed by using raw strings. There are a number of correct ways to write this f-string: with a different quote character: f"abc {a['x']} def" Or with triple quotes: f'''abc {a['x']} def''' Code equivalence The exact code used to implement f-strings is not specified. However, it is guaranteed that any embedded value that is converted to a string will use that value's __format__ method. This is the same mechanism that str.format() uses to convert values to strings. For example, this code: f'abc{expr1:spec1}{expr2!r:spec2}def{expr3}ghi' Might be evaluated as: 'abc' + format(expr1, spec1) + format(repr(expr2), spec2) + 'def' + format(expr3) + 'ghi' Expression evaluation The expressions that are extracted from the string are evaluated in the context where the f-string appeared. This means the expression has full access to local and global variables. Any valid Python expression can be used, including function and method calls. Because the f-strings are evaluated where the string appears in the source code, there is no additional expressiveness available with f-strings. There are also no additional security concerns: you could have also just written the same expression, not inside of an f-string: >>> def foo(): ... return 20 ... >>> f'result={foo()}' 'result=20' Is equivalent to: >>> 'result=' + str(foo()) 'result=20' Expressions are parsed with the equivalent of ast.parse('(' + expression + ')', '<fstring>', 'eval')[7]. Note that since the expression is enclosed by implicit parentheses before evaluation, expressions can contain newlines. For example: >>> x = 0 >>> f'''{x ... +1}''' '1' >>> d = {0: 'zero'} >>> f'''{d[0 ... ]}''' 'zero' Format specifiers Format specifiers may also contain evaluated expressions. This allows code such as: >>> width = 10 >>> precision = 4 >>> value = decimal.Decimal('12.34567') >>> f'result: {value:{width}.{precision}}' 'result: 12.35' Once expressions in a format specifier are evaluated (if necessary), format specifiers are not interpreted by the f-string evaluator. Just as in str.format(), they are merely passed in to the __format__() method of the object being formatted. Concatenating strings Adjacent f-strings and regular strings are concatenated. Regular strings are concatenated at compile time, and f-strings are concatenated at run time. For example, the expression: >>> x = 10 >>> y = 'hi' >>> 'a' 'b' f'{x}' '{c}' f'str<{y:^4}>' 'd' 'e' yields the value: 'ab10{c}str< hi >de' While the exact method of this run time concatenation is unspecified, the above code might evaluate to: 'ab' + format(x) + '{c}' + 'str<' + format(y, '^4') + '>de' Each f-string is entirely evaluated before being concatenated to adjacent f-strings. That means that this: >>> f'{x' f'}' Is a syntax error, because the first f-string does not contain a closing brace. Error handling Either compile time or run time errors can occur when processing f-strings. Compile time errors are limited to those errors that can be detected when scanning an f-string. These errors all raise SyntaxError. Unmatched braces: >>> f'x={x' File "<stdin>", line 1 SyntaxError: f-string: expecting '}' Invalid expressions: >>> f'x={!x}' File "<stdin>", line 1 SyntaxError: f-string: empty expression not allowed Run time errors occur when evaluating the expressions inside an f-string. Note that an f-string can be evaluated multiple times, and work sometimes and raise an error at other times: >>> d = {0:10, 1:20} >>> for i in range(3): ... print(f'{i}:{d[i]}') ... 0:10 1:20 Traceback (most recent call last): File "<stdin>", line 2, in <module> KeyError: 2 or: >>> for x in (32, 100, 'fifty'): ... print(f'x = {x:+3}') ... 'x = +32' 'x = +100' Traceback (most recent call last): File "<stdin>", line 2, in <module> ValueError: Sign not allowed in string format specifier Leading and trailing whitespace in expressions is ignored For ease of readability, leading and trailing whitespace in expressions is ignored. This is a by-product of enclosing the expression in parentheses before evaluation. Evaluation order of expressions The expressions in an f-string are evaluated in left-to-right order. This is detectable only if the expressions have side effects: >>> def fn(l, incr): ... result = l[0] ... l[0] += incr ... return result ... >>> lst = [0] >>> f'{fn(lst,2)} {fn(lst,3)}' '0 2' >>> f'{fn(lst,2)} {fn(lst,3)}' '5 7' >>> lst [10] Discussion python-ideas discussion Most of the discussions on python-ideas[8] focused on three issues: - How to denote f-strings, - How to specify the location of expressions in f-strings, and - Whether to allow full Python expressions. How to denote f-strings Because the compiler must be involved in evaluating the expressions contained in the interpolated strings, there must be some way to denote to the compiler which strings should be evaluated. This PEP chose a leading 'f' character preceding the string literal. This is similar to how 'b' and 'r' prefixes change the meaning of the string itself, at compile time. Other prefixes were suggested, such as 'i'. No option seemed better than the other, so 'f' was chosen. Another option was to support special functions, known to the compiler, such as Format(). This seems like too much magic for Python: not only is there a chance for collision with existing identifiers, the PEP author feels that it's better to signify the magic with a string prefix character. How to specify the location of expressions in f-strings This PEP supports the same syntax as str.format() for distinguishing replacement text inside strings: expressions are contained inside braces. There were other options suggested, such as string.Template's $identifier or ${expression}. While $identifier is no doubt more familiar to shell scripters and users of some other languages, in Python str.format() is heavily used. A quick search of Python's standard library shows only a handful of uses of string.Template, but hundreds of uses of str.format(). Another proposed alternative was to have the substituted text between \{ and } or between \{ and \}. While this syntax would probably be desirable if all string literals were to support interpolation, this PEP only supports strings that are already marked with the leading 'f'. As such, the PEP is using unadorned braces to denoted substituted text, in order to leverage end user familiarity with str.format(). Supporting full Python expressions Many people on the python-ideas discussion wanted support for either only single identifiers, or a limited subset of Python expressions (such as the subset supported by str.format()). This PEP supports full Python expressions inside the braces. Without full expressions, some desirable usage would be cumbersome. For example: >>> f'Column={col_idx+1}' >>> f'number of items: {len(items)}' would become: >>> col_number = col_idx+1 >>> f'Column={col_number}' >>> n_items = len(items) >>> f'number of items: {n_items}' While it's true that very ugly expressions could be included in the f-strings, this PEP takes the position that such uses should be addressed in a linter or code review: >>> f'mapping is { {a:b for (a, b) in ((1, 2), (3, 4))} }' 'mapping is {1: 2, 3: 4}' Similar support in other languages Wikipedia has a good discussion of string interpolation in other programming languages[9]. This feature is implemented in many languages, with a variety of syntaxes and restrictions. Differences between f-string and str.format expressions There is one small difference between the limited expressions allowed in str.format() and the full expressions allowed inside f-strings. The difference is in how index lookups are performed. In str.format(), index values that do not look like numbers are converted to strings: >>> d = {'a': 10, 'b': 20} >>> 'a={d[a]}'.format(d=d) 'a=10' Notice that the index value is converted to the string 'a' when it is looked up in the dict. However, in f-strings, you would need to use a literal for the value of 'a': >>> f'a={d["a"]}' 'a=10' This difference is required because otherwise you would not be able to use variables as index values: >>> a = 'b' >>> f'a={d[a]}' 'a=20' See[10] for a further discussion. It was this observation that led to full Python expressions being supported in f-strings. Furthermore, the limited expressions that str.format() understands need not be valid Python expressions. For example: >>> '{i[";]}'.format(i={'";':4}) '4' For this reason, the str.format() "expression parser" is not suitable for use when implementing f-strings. Triple-quoted f-strings Triple quoted f-strings are allowed. These strings are parsed just as normal triple-quoted strings are. After parsing and decoding, the normal f-string logic is applied, and __format__() is called on each value. Raw f-strings Raw and f-strings may be combined. For example, they could be used to build up regular expressions: >>> header = 'Subject' >>> fr'{header}:\s+' 'Subject:\\s+' In addition, raw f-strings may be combined with triple-quoted strings. No binary f-strings For the same reason that we don't support bytes.format(), you may not combine 'f' with 'b' string literals. The primary problem is that an object's __format__() method may return Unicode data that is not compatible with a bytes string. Binary f-strings would first require a solution for bytes.format(). This idea has been proposed in the past, most recently in 461#proposed-variations. The discussions of such a feature usually suggest either - adding a method such as __bformat__() so an object can control how it is converted to bytes, or - having bytes.format() not be as general purpose or extensible as str.format(). Both of these remain as options in the future, if such functionality is desired. !s, !r, and !a are redundant The !s, !r, and !a conversions are not strictly required. Because arbitrary expressions are allowed inside the f-strings, this code: >>> a = 'some string' >>> f'{a!r}' "'some string'" Is identical to: >>> f'{repr(a)}' "'some string'" Similarly, !s can be replaced by calls to str() and !a by calls to ascii(). However, !s, !r, and !a are supported by this PEP in order to minimize the differences with str.format(). !s, !r, and !a are required in str.format() because it does not allow the execution of arbitrary expressions. Lambdas inside expressions Because lambdas use the ':' character, they cannot appear outside of parentheses in an expression. The colon is interpreted as the start of the format specifier, which means the start of the lambda expression is seen and is syntactically invalid. As there's no practical use for a plain lambda in an f-string expression, this is not seen as much of a limitation. If you feel you must use lambdas, they may be used inside of parentheses: >>> f'{(lambda x: x*2)(3)}' '6' Can't combine with 'u' The 'u' prefix was added to Python 3.3 in PEP 414 as a means to ease source compatibility with Python 2.7. Because Python 2.7 will never support f-strings, there is nothing to be gained by being able to combine the 'f' prefix with 'u'. Examples from Python's source code Here are some examples from Python source code that currently use str.format(), and how they would look with f-strings. This PEP does not recommend wholesale converting to f-strings, these are just examples of real-world usages of str.format() and how they'd look if written from scratch using f-strings. Lib/asyncio/locks.py: extra = '{},waiters:{}'.format(extra, len(self._waiters)) extra = f'{extra},waiters:{len(self._waiters)}' Lib/configparser.py: message.append(" [line {0:2d}]".format(lineno)) message.append(f" [line {lineno:2d}]") Tools/clinic/clinic.py: methoddef_name = "{}_METHODDEF".format(c_basename.upper()) methoddef_name = f"{c_basename.upper()}_METHODDEF" python-config.py: print("Usage: {0} [{1}]".format(sys.argv[0], '|'.join('--'+opt for opt in valid_opts)), file=sys.stderr) print(f"Usage: {sys.argv[0]} [{'|'.join('--'+opt for opt in valid_opts)}]", file=sys.stderr) References Copyright This document has been placed in the public domain. Local Variables: mode: indented-text indent-tabs-mode: nil sentence-end-double-space: t fill-column: 70 coding: utf-8 End: [1] %-formatting (https://docs.python.org/3/library/stdtypes.html#printf-style-string-formatting) [2] str.format (https://docs.python.org/3/library/string.html#formatstrings) [3] string.Template documentation (https://docs.python.org/3/library/string.html#template-strings) [4] Formatting using locals() and globals() (https://mail.python.org/pipermail/python-ideas/2015-July/034671.html) [5] Avoid locals() and globals() (https://mail.python.org/pipermail/python-ideas/2015-July/034701.html) [6] String literal description (https://docs.python.org/3/reference/lexical_analysis.html#string-and-bytes-literals) [7] ast.parse() documentation (https://docs.python.org/3/library/ast.html#ast.parse) [8] Start of python-ideas discussion (https://mail.python.org/pipermail/python-ideas/2015-July/034657.html) [9] Wikipedia article on string interpolation (https://en.wikipedia.org/wiki/String_interpolation) [10] Differences in str.format() and f-string expressions (https://mail.python.org/pipermail/python-ideas/2015-July/034726.html)
python-peps
2024-10-18T13:23:32.475463
2015-08-01T00:00:00
{ "license": "Public Domain", "url": "https://peps.python.org/pep-0498/", "authors": [ "Eric V. Smith" ], "pep_number": "0498", "pandoc_version": "3.5" }
0531
PEP: 531 Title: Existence checking operators Version: $Revision$ Last-Modified: $Date$ Author: Alyssa Coghlan <[email protected]> Status: Withdrawn Type: Standards Track Content-Type: text/x-rst Created: 25-Oct-2016 Python-Version: 3.7 Post-History: 28-Oct-2016 Abstract Inspired by PEP 505 and the related discussions, this PEP proposes the addition of two new control flow operators to Python: - Existence-checking precondition ("exists-then"): expr1 ?then expr2 - Existence-checking fallback ("exists-else"): expr1 ?else expr2 as well as the following abbreviations for common existence checking expressions and statements: - Existence-checking attribute access: obj?.attr (for obj ?then obj.attr) - Existence-checking subscripting: obj?[expr] (for obj ?then obj[expr]) - Existence-checking assignment: value ?= expr (for value = value ?else expr) The common ? symbol in these new operator definitions indicates that they use a new "existence checking" protocol rather than the established truth-checking protocol used by if statements, while loops, comprehensions, generator expressions, conditional expressions, logical conjunction, and logical disjunction. This new protocol would be made available as operator.exists, with the following characteristics: - types can define a new __exists__ magic method (Python) or tp_exists slot (C) to override the default behaviour. This optional method has the same signature and possible return values as __bool__. - operator.exists(None) returns False - operator.exists(NotImplemented) returns False - operator.exists(Ellipsis) returns False - float, complex and decimal.Decimal will override the existence check such that NaN values return False and other values (including zero values) return True - for any other type, operator.exists(obj) returns True by default. Most importantly, values that evaluate to False in a truth checking context (zeroes, empty containers) will still evaluate to True in an existence checking context PEP Withdrawal When posting this PEP for discussion on python-ideas[1], I asked reviewers to consider 3 high level design questions before moving on to considering the specifics of this particular syntactic proposal: 1. Do we collectively agree that "existence checking" is a useful general concept that exists in software development and is distinct from the concept of "truth checking"? 2. Do we collectively agree that the Python ecosystem would benefit from an existence checking protocol that permits generalisation of algorithms (especially short circuiting ones) across different "data missing" indicators, including those defined in the language definition, the standard library, and custom user code? 3. Do we collectively agree that it would be easier to use such a protocol effectively if existence-checking equivalents to the truth-checking "and" and "or" control flow operators were available? While the answers to the first question were generally positive, it quickly became clear that the answer to the second question is "No". Steven D'Aprano articulated the counter-argument well in[2], but the general idea is that when checking for "missing data" sentinels, we're almost always looking for a specific sentinel value, rather than any sentinel value. NotImplemented exists, for example, due to None being a potentially legitimate result from overloaded arithmetic operators and exception handling imposing too much runtime overhead to be useful for operand coercion. Similarly, Ellipsis exists for multi-dimensional slicing support due to None already have another meaning in a slicing context (indicating the use of the default start or stop indices, or the default step size). In mathematics, the value of NaN is that programmatically it behaves like a normal value of its type (e.g. exposing all the usual attributes and methods), while arithmetically it behaves according to the mathematical rules for handling NaN values. With that core design concept invalidated, the proposal as a whole doesn't make sense, and it is accordingly withdrawn. However, the discussion of the proposal did prompt consideration of a potential protocol based approach to make the existing and, or and if-else operators more flexible[3] without introducing any new syntax, so I'll be writing that up as another possible alternative to PEP 505. Relationship with other PEPs While this PEP was inspired by and builds on Mark Haase's excellent work in putting together PEP 505, it ultimately competes with that PEP due to significant differences in the specifics of the proposed syntax and semantics for the feature. It also presents a different perspective on the rationale for the change by focusing on the benefits to existing Python users as the typical demands of application and service development activities are genuinely changing. It isn't an accident that similar features are now appearing in multiple programming languages, and while it's a good idea for us to learn from how other language designers are handling the problem, precedents being set elsewhere are more relevant to how we would go about tackling this problem than they are to whether or not we think it's a problem we should address in the first place. Rationale Existence checking expressions An increasingly common requirement in modern software development is the need to work with "semi-structured data": data where the structure of the data is known in advance, but pieces of it may be missing at runtime, and the software manipulating that data is expected to degrade gracefully (e.g. by omitting results that depend on the missing data) rather than failing outright. Some particularly common cases where this issue arises are: - handling optional application configuration settings and function parameters - handling external service failures in distributed systems - handling data sets that include some partial records It is the latter two cases that are the primary motivation for this PEP - while needing to deal with optional configuration settings and parameters is a design requirement at least as old as Python itself, the rise of public cloud infrastructure, the development of software systems as collaborative networks of distributed services, and the availability of large public and private data sets for analysis means that the ability to degrade operations gracefully in the face of partial service failures or partial data availability is becoming an essential feature of modern programming environments. At the moment, writing such software in Python can be genuinely awkward, as your code ends up littered with expressions like: - value1 = expr1.field.of.interest if expr1 is not None else None - value2 = expr2["field"]["of"]["interest"] if expr2 is not None else None - value3 = expr3 if expr3 is not None else expr4 if expr4 is not None else expr5 If these are only occasional, then expanding out to full statement forms may help improve readability, but if you have 4 or 5 of them in a row (which is a fairly common situation in data transformation pipelines), then replacing them with 16 or 20 lines of conditional logic really doesn't help matters. Expanding the three examples above that way hopefully helps illustrate that: if expr1 is not None: value1 = expr1.field.of.interest else: value1 = None if expr2 is not None: value2 = expr2["field"]["of"]["interest"] else: value2 = None if expr3 is not None: value3 = expr3 else: if expr4 is not None: value3 = expr4 else: value3 = expr5 The combined impact of the proposals in this PEP is to allow the above sample expressions to instead be written as: - value1 = expr1?.field.of.interest - value2 = expr2?["field"]["of"]["interest"] - value3 = expr3 ?else expr4 ?else expr5 In these forms, almost all of the information presented to the reader is immediately relevant to the question "What does this code do?", while the boilerplate code to handle missing data by passing it through to the output or falling back to an alternative input, has shrunk to two uses of the ? symbol and two uses of the ?else keyword. In the first two examples, the 31 character boilerplate clause if exprN is not None else None (minimally 27 characters for a single letter variable name) has been replaced by a single ? character, substantially improving the signal-to-pattern-noise ratio of the lines (especially if it encourages the use of more meaningful variable and field names rather than making them shorter purely for the sake of expression brevity). In the last example, two instances of the 21 character boilerplate, if exprN is not None (minimally 17 characters) are replaced with single characters, again substantially improving the signal-to-pattern-noise ratio. Furthermore, each of our 5 "subexpressions of potential interest" is included exactly once, rather than 4 of them needing to be duplicated or pulled out to a named variable in order to first check if they exist. The existence checking precondition operator is mainly defined to provide a clear conceptual basis for the existence checking attribute access and subscripting operators: - obj?.attr is roughly equivalent to obj ?then obj.attr - obj?[expr] is roughly equivalent to obj ?then obj[expr] The main semantic difference between the shorthand forms and their expanded equivalents is that the common subexpression to the left of the existence checking operator is evaluated only once in the shorthand form (similar to the benefit offered by augmented assignment statements). Existence checking assignment Existence-checking assignment is proposed as a relatively straightforward expansion of the concepts in this PEP to also cover the common configuration handling idiom: - value = value if value is not None else expensive_default() by allowing that to instead be abbreviated as: - value ?= expensive_default() This is mainly beneficial when the target is a subscript operation or subattribute, as even without this specific change, the PEP would still permit this idiom to be updated to: - value = value ?else expensive_default() The main argument against adding this form is that it's arguably ambiguous and could mean either: - value = value ?else expensive_default(); or - value = value ?then value.subfield.of.interest The second form isn't at all useful, but if this concern was deemed significant enough to address while still keeping the augmented assignment feature, the full keyword could be included in the syntax: - value ?else= expensive_default() Alternatively, augmented assignment could just be dropped from the current proposal entirely and potentially reconsidered at a later date. Existence checking protocol The existence checking protocol is including in this proposal primarily to allow for proxy objects (e.g. local representations of remote resources) and mock objects used in testing to correctly indicate non-existence of target resources, even though the proxy or mock object itself is not None. However, with that protocol defined, it then seems natural to expand it to provide a type independent way of checking for NaN values in numeric types - at the moment you need to be aware of the exact data type you're working with (e.g. builtin floats, builtin complex numbers, the decimal module) and use the appropriate operation (e.g. math.isnan, cmath.isnan, decimal.getcontext().is_nan(), respectively) Similarly, it seems reasonable to declare that the other placeholder builtin singletons, Ellipsis and NotImplemented, also qualify as objects that represent the absence of data more so than they represent data. Proposed symbolic notation Python has historically only had one kind of implied boolean context: truth checking, which can be invoked directly via the bool() builtin. As this PEP proposes a new kind of control flow operation based on existence checking rather than truth checking, it is considered valuable to have a reminder directly in the code when existence checking is being used rather than truth checking. The mathematical symbol for existence assertions is U+2203 'THERE EXISTS': ∃ Accordingly, one possible approach to the syntactic additions proposed in this PEP would be to use that already defined mathematical notation: - expr1 ∃then expr2 - expr1 ∃else expr2 - obj∃.attr - obj∃[expr] - target ∃= expr However, there are two major problems with that approach, one practical, and one pedagogical. The practical problem is the usual one that most keyboards don't offer any easy way of entering mathematical symbols other than those used in basic arithmetic (even the symbols appearing in this PEP were ultimately copied & pasted from[4] rather than being entered directly). The pedagogical problem is that the symbols for existence assertions (∃) and universal assertions (∀) aren't going to be familiar to most people the way basic arithmetic operators are, so we wouldn't actually be making the proposed syntax easier to understand by adopting ∃. By contrast, ? is one of the few remaining unused ASCII punctuation characters in Python's syntax, making it available as a candidate syntactic marker for "this control flow operation is based on an existence check, not a truth check". Taking that path would also have the advantage of aligning Python's syntax with corresponding syntax in other languages that offer similar features. Drawing from the existing summary in PEP 505 and the Wikipedia articles on the "safe navigation operator[5] and the "null coalescing operator"[6], we see: - The ?. existence checking attribute access syntax precisely aligns with: - the "safe navigation" attribute access operator in C# (?.) - the "optional chaining" operator in Swift (?.) - the "safe navigation" attribute access operator in Groovy (?.) - the "conditional member access" operator in Dart (?.) - The ?[] existence checking attribute access syntax precisely aligns with: - the "safe navigation" subscript operator in C# (?[]) - the "optional subscript" operator in Swift (?[].) - The ?else existence checking fallback syntax semantically aligns with: - the "null-coalescing" operator in C# (??) - the "null-coalescing" operator in PHP (??) - the "nil-coalescing" operator in Swift (??) To be clear, these aren't the only spelling of these operators used in other languages, but they're the most common ones, and the ? symbol is the most common syntactic marker by far (presumably prompted by the use of ? to introduce the "then" clause in C-style conditional expressions, which many of these languages also offer). Proposed keywords Given the symbolic marker ?, it would be syntactically unambiguous to spell the existence checking precondition and fallback operations using the same keywords as their truth checking counterparts: - expr1 ?and expr2 (instead of expr1 ?then expr2) - expr1 ?or expr2 (instead of expr1 ?else expr2) However, while syntactically unambiguous when written, this approach makes the code incredibly hard to pronounce (What's the pronunciation of "?"?) and also hard to describe (given reused keywords, there's no obvious shorthand terms for "existence checking precondition (?and)" and "existence checking fallback (?or)" that would distinguish them from "logical conjunction (and)" and "logical disjunction (or)"). We could try to encourage folks to pronounce the ? symbol as "exists", making the shorthand names the "exists-and expression" and the "exists-or expression", but there'd be no way of guessing those names purely from seeing them written in a piece of code. Instead, this PEP takes advantage of the proposed symbolic syntax to introduce a new keyword (?then) and borrow an existing one (?else) in a way that allows people to refer to "then expressions" and "else expressions" without ambiguity. These keywords also align well with the conditional expressions that are semantically equivalent to the proposed expressions. For ?else expressions, expr1 ?else expr2 is equivalent to: _lhs_result = expr1 _lhs_result if operator.exists(_lhs_result) else expr2 Here the parallel is clear, since the else expr2 appears at the end of both the abbreviated and expanded forms. For ?then expressions, expr1 ?then expr2 is equivalent to: _lhs_result = expr1 expr2 if operator.exists(_lhs_result) else _lhs_result Here the parallel isn't as immediately obvious due to Python's traditionally anonymous "then" clauses (introduced by : in if statements and suffixed by if in conditional expressions), but it's still reasonably clear as long as you're already familiar with the "if-then-else" explanation of conditional control flow. Risks and concerns Readability Learning to read and write the new syntax effectively mainly requires internalising two concepts: - expressions containing ? include an existence check and may short circuit - if None or another "non-existent" value is an expected input, and the correct handling is to propagate that to the result, then the existence checking operators are likely what you want Currently, these concepts aren't explicitly represented at the language level, so it's a matter of learning to recognise and use the various idiomatic patterns based on conditional expressions and statements. Magic syntax There's nothing about ? as a syntactic element that inherently suggests is not None or operator.exists. The main current use of ? as a symbol in Python code is as a trailing suffix in IPython environments to request help information for the result of the preceding expression. However, the notion of existence checking really does benefit from a pervasive visual marker that distinguishes it from truth checking, and that calls for a single-character symbolic syntax if we're going to do it at all. Conceptual complexity This proposal takes the currently ad hoc and informal concept of "existence checking" and elevates it to the status of being a syntactic language feature with a clearly defined operator protocol. In many ways, this should actually reduce the overall conceptual complexity of the language, as many more expectations will map correctly between truth checking with bool(expr) and existence checking with operator.exists(expr) than currently map between truth checking and existence checking with expr is not None (or expr is not NotImplemented in the context of operand coercion, or the various NaN-checking operations in mathematical libraries). As a simple example of the new parallels introduced by this PEP, compare: all_are_true = all(map(bool, iterable)) at_least_one_is_true = any(map(bool, iterable)) all_exist = all(map(operator.exists, iterable)) at_least_one_exists = any(map(operator.exists, iterable)) Design Discussion Subtleties in chaining existence checking expressions Similar subtleties arise in chaining existence checking expressions as already exist in chaining logical operators: the behaviour can be surprising if the right hand side of one of the expressions in the chain itself returns a value that doesn't exist. As a result, value = arg1 ?then f(arg1) ?else default() would be dubious for essentially the same reason that value = cond and expr1 or expr2 is dubious: the former will evaluate default() if f(arg1) returns None, just as the latter will evaluate expr2 if expr1 evaluates to False in a boolean context. Ambiguous interaction with conditional expressions In the proposal as currently written, the following is a syntax error: - value = f(arg) if arg ?else default While the following is a valid operation that checks a second condition if the first doesn't exist rather than merely being false: - value = expr1 if cond1 ?else cond2 else expr2 The expression chaining problem described above means that the argument can be made that the first operation should instead be equivalent to: - value = f(arg) if operator.exists(arg) else default requiring the second to be written in the arguably clearer form: - value = expr1 if (cond1 ?else cond2) else expr2 Alternatively, the first form could remain a syntax error, and the existence checking symbol could instead be attached to the if keyword: - value = expr1 if? cond else expr2 Existence checking in other truth-checking contexts The truth-checking protocol is currently used in the following syntactic constructs: - logical conjunction (and-expressions) - logical disjunction (or-expressions) - conditional expressions (if-else expressions) - if statements - while loops - filter clauses in comprehensions and generator expressions In the current PEP, switching from truth-checking with and and or to existence-checking is a matter of substituting in the new keywords, ?then and ?else in the appropriate places. For other truth-checking contexts, it proposes either importing and using the operator.exists API, or else continuing with the current idiom of checking specifically for expr is not None (or the context appropriate equivalent). The simplest possible enhancement in that regard would be to elevate the proposed exists() API from an operator module function to a new builtin function. Alternatively, the ? existence checking symbol could be supported as a modifier on the if and while keywords to indicate the use of an existence check rather than a truth check. However, it isn't at all clear that the potential consistency benefits gained for either suggestion would justify the additional disruption, so they've currently been omitted from the proposal. Defining expected invariant relations between __bool__ and __exists__ The PEP currently leaves the definition of __bool__ on all existing types unmodified, which ensures the entire proposal remains backwards compatible, but results in the following cases where bool(obj) returns True, but the proposed operator.exists(obj) would return False: - NaN values for float, complex, and decimal.Decimal - Ellipsis - NotImplemented The main argument for potentially changing these is that it becomes easier to reason about potential code behaviour if we have a recommended invariant in place saying that values which indicate they don't exist in an existence checking context should also report themselves as being False in a truth checking context. Failing to define such an invariant would lead to arguably odd outcomes like float("NaN") ?else 0.0 returning 0.0 while float("NaN") or 0.0 returns NaN. Limitations Arbitrary sentinel objects This proposal doesn't attempt to provide syntactic support for the "sentinel object" idiom, where None is a permitted explicit value, so a separate sentinel object is defined to indicate missing values: _SENTINEL = object() def f(obj=_SENTINEL): return obj if obj is not _SENTINEL else default_value() This could potentially be supported at the expense of making the existence protocol definition significantly more complex, both to define and to use: - at the Python layer, operator.exists and __exists__ implementations would return the empty tuple to indicate non-existence, and otherwise return a singleton tuple containing a reference to the object to be used as the result of the existence check - at the C layer, tp_exists implementations would return NULL to indicate non-existence, and otherwise return a PyObject * pointer as the result of the existence check Given that change, the sentinel object idiom could be rewritten as: class Maybe: SENTINEL = object() def __init__(self, value): self._result = (value,) is value is not self.SENTINEL else () def __exists__(self): return self._result def f(obj=Maybe.SENTINEL): return Maybe(obj) ?else default_value() However, I don't think cases where the 3 proposed standard sentinel values (i.e. None, Ellipsis and NotImplemented) can't be used are going to be anywhere near common enough for the additional protocol complexity and the loss of symmetry between __bool__ and __exists__ to be worth it. Specification The Abstract already gives the gist of the proposal and the Rationale gives some specific examples. If there's enough interest in the basic idea, then a full specification will need to provide a precise correspondence between the proposed syntactic sugar and the underlying conditional expressions that is sufficient to guide the creation of a reference implementation. ...TBD... Implementation As with PEP 505, actual implementation has been deferred pending in-principle interest in the idea of adding these operators - the implementation isn't the hard part of these proposals, the hard part is deciding whether or not this is a change where the long term benefits for new and existing Python users outweigh the short term costs involved in the wider ecosystem (including developers of other implementations, language curriculum developers, and authors of other Python related educational material) adjusting to the change. ...TBD... References Copyright This document has been placed in the public domain under the terms of the CC0 1.0 license: https://creativecommons.org/publicdomain/zero/1.0/ [1] python-ideas discussion thread (https://mail.python.org/pipermail/python-ideas/2016-October/043415.html) [2] Steven D'Aprano's critique of the proposal (https://mail.python.org/pipermail/python-ideas/2016-October/043453.html) [3] Considering a link to the idea of overloadable Boolean operators (https://mail.python.org/pipermail/python-ideas/2016-October/043447.html) [4] FileFormat.info: Unicode Character 'THERE EXISTS' (U+2203) (http://www.fileformat.info/info/unicode/char/2203/index.htm) [5] Wikipedia: Safe navigation operator (https://en.wikipedia.org/wiki/Safe_navigation_operator) [6] Wikipedia: Null coalescing operator (https://en.wikipedia.org/wiki/Null_coalescing_operator)
python-peps
2024-10-18T13:23:32.515219
2016-10-25T00:00:00
{ "license": "Public Domain", "url": "https://peps.python.org/pep-0531/", "authors": [ "Alyssa Coghlan" ], "pep_number": "0531", "pandoc_version": "3.5" }
0645
PEP: 645 Title: Allow writing optional types as x? Author: Maggie Moss <[email protected]> Sponsor: Guido van Rossum <[email protected]> Status: Withdrawn Type: Standards Track Content-Type: text/x-rst Created: 25-Aug-2020 Resolution: https://mail.python.org/archives/list/[email protected]/message/E75SPV6DDHLEEFSA5MBN5HUOQWDMUQJ2/ Abstract This PEP proposes adding a ? operator for types to allow writing int? in place of Optional[int]. PEP Withdrawal The notation T|None introduced by PEP 604 to write Optional[T] is a fine alternative to T? and does not require new syntax. Using T? to mean T|None is also inconsistent with TypeScript where it roughly means NotRequired[T]. Such inconsistency would likely confuse folks coming from TypeScript to Python. The above represents the consensus of typing-sig and the sponsor of this PEP. Motivation Types have become a valuable and powerful part of the Python language. However, many type annotations are verbose and add considerable friction to using type annotations. By improving the typing syntax, adding types to Python code becomes simpler and improves the development experience for Python users. In a similar vein, a PEP to introduce short hand syntax for Union types <604> has been approved and implemented. Rationale Types in Python can be quite verbose, this can be a hindrance when working towards type adoption. Making types more ergonomic, as was done with the Union type in PEP 604 (e.g., int | str), would reduce the effort needed to add types to new and existing Python code. The Optional annotation is used frequently in both partially and fully typed Python code bases. In a small sampling of 5 well-typed open source projects, on average 7% of annotations included at least one optional type. This indicates that updating the syntax has the potential to make types more concise, reduce code length and improve readability. Simplifying the syntax for optionals has been discussed previously within the typing community. The consensus during these conversations has been that ? is the preferred operator. There is no native support for unary ? in Python and this will need to be added to the runtime. Adding the ? sigil to the Python grammar has been proposed previously in PEP 505, which is currently in a deferred state. PEP 505 proposes a: - "None coalescing" binary operator ?? - "None-aware attribute access" operator ?. ("maybe dot") - "None-aware indexing" operator ?[] ("maybe subscript") Should PEP 505 be approved in the future, it would not interfere with the typing specific ? proposed in this PEP. As well, since all uses of the ? would be conceptually related, it would not be confusing in terms of learning Python or a hindrance to quick visual comprehension. The proposed syntax, with the postfix operator, mimics the optional syntax found in other typed languages, like C#, TypeScript and Swift. The widespread adoption and popularity of these languages means that Python developers are likely already familiar with this syntax. // Optional in Swift var example: String? // Optional in C# string? example; Adding this syntax would also follow the often used pattern of using builtin types as annotations. For example, list, dict and None. This would allow more annotations to be added to Python code without importing from typing. Specification The new optional syntax should be accepted for function, variable, attribute and parameter annotations. # instead of # def foo(x: Optional[int], y: Optional[str], z: Optional[list[int]): ... def foo(x: int?, y: str?, x: list[int]?): ... # def bar(x: list[typing.Optional[int]]): ... def bar(x: list[int?]): ... The new optional syntax should be equivalent to the existing typing.Optional syntax typing.Optional[int] == int? The new optional syntax should have the same identity as the existing typing.Optional syntax. typing.Optional[int] is int? It should also be equivalent to a Union with None. # old syntax int? == typing.Union[int, None] # new syntax int? == int | None Since the new Union syntax specified in PEP 604 is supported in isinstance and issubclass, the new optional syntax should be supported in both isinstance and issubclass, isinstance(1, int?) # true issubclass(Child, Super?) # true A new dunder method will need to be implemented to allow the ? operator to be overloaded for other functionality. Backwards Compatibility ? is currently unused in Python syntax, therefore this PEP is fully backwards compatible. Reference Implementation A reference implementation can be found here. Rejected Ideas Discussed alternatives were - The ~ operator was considered in place of ?. - A prefix operator (?int). Copyright This document is placed in the public domain or under the CC0-1.0-Universal license, whichever is more permissive.
python-peps
2024-10-18T13:23:32.527205
2020-08-25T00:00:00
{ "license": "Public Domain", "url": "https://peps.python.org/pep-0645/", "authors": [ "Maggie Moss" ], "pep_number": "0645", "pandoc_version": "3.5" }
0476
PEP: 476 Title: Enabling certificate verification by default for stdlib http clients Version: $Revision$ Last-Modified: $Date$ Author: Alex Gaynor <[email protected]> Status: Final Type: Standards Track Content-Type: text/x-rst Created: 28-Aug-2014 Python-Version: 2.7.9, 3.4.3, 3.5 Resolution: https://mail.python.org/pipermail/python-dev/2014-October/136676.html Abstract Currently when a standard library http client (the urllib, urllib2, http, and httplib modules) encounters an https:// URL it will wrap the network HTTP traffic in a TLS stream, as is necessary to communicate with such a server. However, during the TLS handshake it will not actually check that the server has an X509 certificate is signed by a CA in any trust root, nor will it verify that the Common Name (or Subject Alternate Name) on the presented certificate matches the requested host. The failure to do these checks means that anyone with a privileged network position is able to trivially execute a man in the middle attack against a Python application using either of these HTTP clients, and change traffic at will. This PEP proposes to enable verification of X509 certificate signatures, as well as hostname verification for Python's HTTP clients by default, subject to opt-out on a per-call basis. This change would be applied to Python 2.7, Python 3.4, and Python 3.5. Rationale The "S" in "HTTPS" stands for secure. When Python's users type "HTTPS" they are expecting a secure connection, and Python should adhere to a reasonable standard of care in delivering this. Currently we are failing at this, and in doing so, APIs which appear simple are misleading users. When asked, many Python users state that they were not aware that Python failed to perform these validations, and are shocked. The popularity of requests (which enables these checks by default) demonstrates that these checks are not overly burdensome in any way, and the fact that it is widely recommended as a major security improvement over the standard library clients demonstrates that many expect a higher standard for "security by default" from their tools. The failure of various applications to note Python's negligence in this matter is a source of regular CVE assignment[1][2][3][4][5][6][7][8] [9][10][11]. Technical Details Python would use the system provided certificate database on all platforms. Failure to locate such a database would be an error, and users would need to explicitly specify a location to fix it. This will be achieved by adding a new ssl._create_default_https_context function, which is the same as ssl.create_default_context. http.client can then replace its usage of ssl._create_stdlib_context with the ssl._create_default_https_context. Additionally ssl._create_stdlib_context is renamed ssl._create_unverified_context (an alias is kept around for backwards compatibility reasons). Trust database This PEP proposes using the system-provided certificate database. Previous discussions have suggested bundling Mozilla's certificate database and using that by default. This was decided against for several reasons: - Using the platform trust database imposes a lower maintenance burden on the Python developers -- shipping our own trust database would require doing a release every time a certificate was revoked. - Linux vendors, and other downstreams, would unbundle the Mozilla certificates, resulting in a more fragmented set of behaviors. - Using the platform stores makes it easier to handle situations such as corporate internal CAs. OpenSSL also has a pair of environment variables, SSL_CERT_DIR and SSL_CERT_FILE which can be used to point Python at a different certificate database. Backwards compatibility This change will have the appearance of causing some HTTPS connections to "break", because they will now raise an Exception during handshake. This is misleading however, in fact these connections are presently failing silently, an HTTPS URL indicates an expectation of confidentiality and authentication. The fact that Python does not actually verify that the user's request has been made is a bug, further: "Errors should never pass silently." Nevertheless, users who have a need to access servers with self-signed or incorrect certificates would be able to do so by providing a context with custom trust roots or which disables validation (documentation should strongly recommend the former where possible). Users will also be able to add necessary certificates to system trust stores in order to trust them globally. Twisted's 14.0 release made this same change, and it has been met with almost no opposition. Opting out For users who wish to opt out of certificate verification on a single connection, they can achieve this by providing the context argument to urllib.urlopen: import ssl # This restores the same behavior as before. context = ssl._create_unverified_context() urllib.urlopen("https://no-valid-cert", context=context) It is also possible, though highly discouraged, to globally disable verification by monkeypatching the ssl module in versions of Python that implement this PEP: import ssl try: _create_unverified_https_context = ssl._create_unverified_context except AttributeError: # Legacy Python that doesn't verify HTTPS certificates by default pass else: # Handle target environment that doesn't support HTTPS verification ssl._create_default_https_context = _create_unverified_https_context This guidance is aimed primarily at system administrators that wish to adopt newer versions of Python that implement this PEP in legacy environments that do not yet support certificate verification on HTTPS connections. For example, an administrator may opt out by adding the monkeypatch above to sitecustomize.py in their Standard Operating Environment for Python. Applications and libraries SHOULD NOT be making this change process wide (except perhaps in response to a system administrator controlled configuration setting). Particularly security sensitive applications should always provide an explicit application defined SSL context rather than relying on the default behaviour of the underlying Python implementation. Other protocols This PEP only proposes requiring this level of validation for HTTP clients, not for other protocols such as SMTP. This is because while a high percentage of HTTPS servers have correct certificates, as a result of the validation performed by browsers, for other protocols self-signed or otherwise incorrect certificates are far more common. Note that for SMTP at least, this appears to be changing and should be reviewed for a potential similar PEP in the future: - https://www.facebook.com/notes/protect-the-graph/the-current-state-of-smtp-starttls-deployment/1453015901605223 - https://www.facebook.com/notes/protect-the-graph/massive-growth-in-smtp-starttls-deployment/1491049534468526 Python Versions This PEP describes changes that will occur on both the 3.4.x, 3.5 and 2.7.X branches. For 2.7.X this will require backporting the context (SSLContext) argument to httplib, in addition to the features already backported in PEP 466. Implementation - LANDED: Issue 22366 adds the context argument to urlib.request.urlopen. - Issue 22417 implements the substance of this PEP. Copyright This document has been placed into the public domain. [1] https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2010-4340 [2] https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2012-3533 [3] https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2012-5822 [4] https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2012-5825 [5] https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2013-1909 [6] https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2013-2037 [7] https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2013-2073 [8] https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2013-2191 [9] https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2013-4111 [10] https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2013-6396 [11] https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2013-6444
python-peps
2024-10-18T13:23:32.539512
2014-08-28T00:00:00
{ "license": "Public Domain", "url": "https://peps.python.org/pep-0476/", "authors": [ "Alex Gaynor" ], "pep_number": "0476", "pandoc_version": "3.5" }
0339
PEP: 339 Title: Design of the CPython Compiler Version: $Revision$ Last-Modified: $Date$ Author: Brett Cannon <[email protected]> Status: Withdrawn Type: Informational Content-Type: text/x-rst Created: 02-Feb-2005 Post-History: Note This PEP has been withdrawn and moved to the Python developer's guide. Abstract Historically (through 2.4), compilation from source code to bytecode involved two steps: 1. Parse the source code into a parse tree (Parser/pgen.c) 2. Emit bytecode based on the parse tree (Python/compile.c) Historically, this is not how a standard compiler works. The usual steps for compilation are: 1. Parse source code into a parse tree (Parser/pgen.c) 2. Transform parse tree into an Abstract Syntax Tree (Python/ast.c) 3. Transform AST into a Control Flow Graph (Python/compile.c) 4. Emit bytecode based on the Control Flow Graph (Python/compile.c) Starting with Python 2.5, the above steps are now used. This change was done to simplify compilation by breaking it into three steps. The purpose of this document is to outline how the latter three steps of the process works. This document does not touch on how parsing works beyond what is needed to explain what is needed for compilation. It is also not exhaustive in terms of the how the entire system works. You will most likely need to read some source to have an exact understanding of all details. Parse Trees Python's parser is an LL(1) parser mostly based on the implementation laid out in the Dragon Book [Aho86]. The grammar file for Python can be found in Grammar/Grammar with the numeric value of grammar rules are stored in Include/graminit.h. The numeric values for types of tokens (literal tokens, such as :, numbers, etc.) are kept in Include/token.h). The parse tree made up of node * structs (as defined in Include/node.h). Querying data from the node structs can be done with the following macros (which are all defined in Include/token.h): - CHILD(node *, int) Returns the nth child of the node using zero-offset indexing - RCHILD(node *, int) Returns the nth child of the node from the right side; use negative numbers! - NCH(node *) Number of children the node has - STR(node *) String representation of the node; e.g., will return : for a COLON token - TYPE(node *) The type of node as specified in Include/graminit.h - REQ(node *, TYPE) Assert that the node is the type that is expected - LINENO(node *) retrieve the line number of the source code that led to the creation of the parse rule; defined in Python/ast.c To tie all of this example, consider the rule for 'while': while_stmt: 'while' test ':' suite ['else' ':' suite] The node representing this will have TYPE(node) == while_stmt and the number of children can be 4 or 7 depending on if there is an 'else' statement. To access what should be the first ':' and require it be an actual ':' token, (REQ(CHILD(node, 2), COLON). Abstract Syntax Trees (AST) The abstract syntax tree (AST) is a high-level representation of the program structure without the necessity of containing the source code; it can be thought of as an abstract representation of the source code. The specification of the AST nodes is specified using the Zephyr Abstract Syntax Definition Language (ASDL) [Wang97]. The definition of the AST nodes for Python is found in the file Parser/Python.asdl . Each AST node (representing statements, expressions, and several specialized types, like list comprehensions and exception handlers) is defined by the ASDL. Most definitions in the AST correspond to a particular source construct, such as an 'if' statement or an attribute lookup. The definition is independent of its realization in any particular programming language. The following fragment of the Python ASDL construct demonstrates the approach and syntax: module Python { stmt = FunctionDef(identifier name, arguments args, stmt* body, expr* decorators) | Return(expr? value) | Yield(expr value) attributes (int lineno) } The preceding example describes three different kinds of statements; function definitions, return statements, and yield statements. All three kinds are considered of type stmt as shown by '|' separating the various kinds. They all take arguments of various kinds and amounts. Modifiers on the argument type specify the number of values needed; '?' means it is optional, '*' means 0 or more, no modifier means only one value for the argument and it is required. FunctionDef, for instance, takes an identifier for the name, 'arguments' for args, zero or more stmt arguments for 'body', and zero or more expr arguments for 'decorators'. Do notice that something like 'arguments', which is a node type, is represented as a single AST node and not as a sequence of nodes as with stmt as one might expect. All three kinds also have an 'attributes' argument; this is shown by the fact that 'attributes' lacks a '|' before it. The statement definitions above generate the following C structure type: typedef struct _stmt *stmt_ty; struct _stmt { enum { FunctionDef_kind=1, Return_kind=2, Yield_kind=3 } kind; union { struct { identifier name; arguments_ty args; asdl_seq *body; } FunctionDef; struct { expr_ty value; } Return; struct { expr_ty value; } Yield; } v; int lineno; } Also generated are a series of constructor functions that allocate (in this case) a stmt_ty struct with the appropriate initialization. The 'kind' field specifies which component of the union is initialized. The FunctionDef() constructor function sets 'kind' to FunctionDef_kind and initializes the 'name', 'args', 'body', and 'attributes' fields. Memory Management Before discussing the actual implementation of the compiler, a discussion of how memory is handled is in order. To make memory management simple, an arena is used. This means that a memory is pooled in a single location for easy allocation and removal. What this gives us is the removal of explicit memory deallocation. Because memory allocation for all needed memory in the compiler registers that memory with the arena, a single call to free the arena is all that is needed to completely free all memory used by the compiler. In general, unless you are working on the critical core of the compiler, memory management can be completely ignored. But if you are working at either the very beginning of the compiler or the end, you need to care about how the arena works. All code relating to the arena is in either Include/pyarena.h or Python/pyarena.c . PyArena_New() will create a new arena. The returned PyArena structure will store pointers to all memory given to it. This does the bookkeeping of what memory needs to be freed when the compiler is finished with the memory it used. That freeing is done with PyArena_Free(). This needs to only be called in strategic areas where the compiler exits. As stated above, in general you should not have to worry about memory management when working on the compiler. The technical details have been designed to be hidden from you for most cases. The only exception comes about when managing a PyObject. Since the rest of Python uses reference counting, there is extra support added to the arena to cleanup each PyObject that was allocated. These cases are very rare. However, if you've allocated a PyObject, you must tell the arena about it by calling PyArena_AddPyObject(). Parse Tree to AST The AST is generated from the parse tree (see Python/ast.c) using the function PyAST_FromNode(). The function begins a tree walk of the parse tree, creating various AST nodes as it goes along. It does this by allocating all new nodes it needs, calling the proper AST node creation functions for any required supporting functions, and connecting them as needed. Do realize that there is no automated nor symbolic connection between the grammar specification and the nodes in the parse tree. No help is directly provided by the parse tree as in yacc. For instance, one must keep track of which node in the parse tree one is working with (e.g., if you are working with an 'if' statement you need to watch out for the ':' token to find the end of the conditional). The functions called to generate AST nodes from the parse tree all have the name ast_for_xx where xx is what the grammar rule that the function handles (alias_for_import_name is the exception to this). These in turn call the constructor functions as defined by the ASDL grammar and contained in Python/Python-ast.c (which was generated by Parser/asdl_c.py) to create the nodes of the AST. This all leads to a sequence of AST nodes stored in asdl_seq structs. Function and macros for creating and using asdl_seq * types as found in Python/asdl.c and Include/asdl.h: - asdl_seq_new() Allocate memory for an asdl_seq for the specified length - asdl_seq_GET() Get item held at a specific position in an asdl_seq - asdl_seq_SET() Set a specific index in an asdl_seq to the specified value - asdl_seq_LEN(asdl_seq *) Return the length of an asdl_seq If you are working with statements, you must also worry about keeping track of what line number generated the statement. Currently the line number is passed as the last parameter to each stmt_ty function. Control Flow Graphs A control flow graph (often referenced by its acronym, CFG) is a directed graph that models the flow of a program using basic blocks that contain the intermediate representation (abbreviated "IR", and in this case is Python bytecode) within the blocks. Basic blocks themselves are a block of IR that has a single entry point but possibly multiple exit points. The single entry point is the key to basic blocks; it all has to do with jumps. An entry point is the target of something that changes control flow (such as a function call or a jump) while exit points are instructions that would change the flow of the program (such as jumps and 'return' statements). What this means is that a basic block is a chunk of code that starts at the entry point and runs to an exit point or the end of the block. As an example, consider an 'if' statement with an 'else' block. The guard on the 'if' is a basic block which is pointed to by the basic block containing the code leading to the 'if' statement. The 'if' statement block contains jumps (which are exit points) to the true body of the 'if' and the 'else' body (which may be NULL), each of which are their own basic blocks. Both of those blocks in turn point to the basic block representing the code following the entire 'if' statement. CFGs are usually one step away from final code output. Code is directly generated from the basic blocks (with jump targets adjusted based on the output order) by doing a post-order depth-first search on the CFG following the edges. AST to CFG to Bytecode With the AST created, the next step is to create the CFG. The first step is to convert the AST to Python bytecode without having jump targets resolved to specific offsets (this is calculated when the CFG goes to final bytecode). Essentially, this transforms the AST into Python bytecode with control flow represented by the edges of the CFG. Conversion is done in two passes. The first creates the namespace (variables can be classified as local, free/cell for closures, or global). With that done, the second pass essentially flattens the CFG into a list and calculates jump offsets for final output of bytecode. The conversion process is initiated by a call to the function PyAST_Compile() in Python/compile.c . This function does both the conversion of the AST to a CFG and outputting final bytecode from the CFG. The AST to CFG step is handled mostly by two functions called by PyAST_Compile(); PySymtable_Build() and compiler_mod() . The former is in Python/symtable.c while the latter is in Python/compile.c . PySymtable_Build() begins by entering the starting code block for the AST (passed-in) and then calling the proper symtable_visit_xx function (with xx being the AST node type). Next, the AST tree is walked with the various code blocks that delineate the reach of a local variable as blocks are entered and exited using symtable_enter_block() and symtable_exit_block(), respectively. Once the symbol table is created, it is time for CFG creation, whose code is in Python/compile.c . This is handled by several functions that break the task down by various AST node types. The functions are all named compiler_visit_xx where xx is the name of the node type (such as stmt, expr, etc.). Each function receives a struct compiler * and xx_ty where xx is the AST node type. Typically these functions consist of a large 'switch' statement, branching based on the kind of node type passed to it. Simple things are handled inline in the 'switch' statement with more complex transformations farmed out to other functions named compiler_xx with xx being a descriptive name of what is being handled. When transforming an arbitrary AST node, use the VISIT() macro. The appropriate compiler_visit_xx function is called, based on the value passed in for <node type> (so VISIT(c, expr, node) calls compiler_visit_expr(c, node)). The VISIT_SEQ macro is very similar, but is called on AST node sequences (those values that were created as arguments to a node that used the '*' modifier). There is also VISIT_SLICE() just for handling slices. Emission of bytecode is handled by the following macros: - ADDOP() add a specified opcode - ADDOP_I() add an opcode that takes an argument - ADDOP_O(struct compiler *c, int op, PyObject *type, PyObject *obj) add an opcode with the proper argument based on the position of the specified PyObject in PyObject sequence object, but with no handling of mangled names; used for when you need to do named lookups of objects such as globals, consts, or parameters where name mangling is not possible and the scope of the name is known - ADDOP_NAME() just like ADDOP_O, but name mangling is also handled; used for attribute loading or importing based on name - ADDOP_JABS() create an absolute jump to a basic block - ADDOP_JREL() create a relative jump to a basic block Several helper functions that will emit bytecode and are named compiler_xx() where xx is what the function helps with (list, boolop, etc.). A rather useful one is compiler_nameop(). This function looks up the scope of a variable and, based on the expression context, emits the proper opcode to load, store, or delete the variable. As for handling the line number on which a statement is defined, is handled by compiler_visit_stmt() and thus is not a worry. In addition to emitting bytecode based on the AST node, handling the creation of basic blocks must be done. Below are the macros and functions used for managing basic blocks: - NEW_BLOCK() create block and set it as current - NEXT_BLOCK() basically NEW_BLOCK() plus jump from current block - compiler_new_block() create a block but don't use it (used for generating jumps) Once the CFG is created, it must be flattened and then final emission of bytecode occurs. Flattening is handled using a post-order depth-first search. Once flattened, jump offsets are backpatched based on the flattening and then a PyCodeObject file is created. All of this is handled by calling assemble() . Introducing New Bytecode Sometimes a new feature requires a new opcode. But adding new bytecode is not as simple as just suddenly introducing new bytecode in the AST -> bytecode step of the compiler. Several pieces of code throughout Python depend on having correct information about what bytecode exists. First, you must choose a name and a unique identifier number. The official list of bytecode can be found in Include/opcode.h . If the opcode is to take an argument, it must be given a unique number greater than that assigned to HAVE_ARGUMENT (as found in Include/opcode.h). Once the name/number pair has been chosen and entered in Include/opcode.h, you must also enter it into Lib/opcode.py and Doc/library/dis.rst . With a new bytecode you must also change what is called the magic number for .pyc files. The variable MAGIC in Python/import.c contains the number. Changing this number will lead to all .pyc files with the old MAGIC to be recompiled by the interpreter on import. Finally, you need to introduce the use of the new bytecode. Altering Python/compile.c and Python/ceval.c will be the primary places to change. But you will also need to change the 'compiler' package. The key files to do that are Lib/compiler/pyassem.py and Lib/compiler/pycodegen.py . If you make a change here that can affect the output of bytecode that is already in existence and you do not change the magic number constantly, make sure to delete your old .py(c|o) files! Even though you will end up changing the magic number if you change the bytecode, while you are debugging your work you will be changing the bytecode output without constantly bumping up the magic number. This means you end up with stale .pyc files that will not be recreated. Running find . -name '*.py[co]' -exec rm -f {} ';' should delete all .pyc files you have, forcing new ones to be created and thus allow you test out your new bytecode properly. Code Objects The result of PyAST_Compile() is a PyCodeObject which is defined in Include/code.h . And with that you now have executable Python bytecode! The code objects (byte code) is executed in Python/ceval.c . This file will also need a new case statement for the new opcode in the big switch statement in PyEval_EvalFrameEx(). Important Files - Parser/ - Python.asdl ASDL syntax file - asdl.py "An implementation of the Zephyr Abstract Syntax Definition Language." Uses SPARK to parse the ASDL files. - asdl_c.py "Generate C code from an ASDL description." Generates Python/Python-ast.c and Include/Python-ast.h . - spark.py SPARK parser generator - Python/ - Python-ast.c Creates C structs corresponding to the ASDL types. Also contains code for marshaling AST nodes (core ASDL types have marshaling code in asdl.c). "File automatically generated by Parser/asdl_c.py". This file must be committed separately after every grammar change is committed since the __version__ value is set to the latest grammar change revision number. - asdl.c Contains code to handle the ASDL sequence type. Also has code to handle marshalling the core ASDL types, such as number and identifier. used by Python-ast.c for marshaling AST nodes. - ast.c Converts Python's parse tree into the abstract syntax tree. - ceval.c Executes byte code (aka, eval loop). - compile.c Emits bytecode based on the AST. - symtable.c Generates a symbol table from AST. - pyarena.c Implementation of the arena memory manager. - import.c Home of the magic number (named MAGIC) for bytecode versioning - Include/ - Python-ast.h Contains the actual definitions of the C structs as generated by Python/Python-ast.c . "Automatically generated by Parser/asdl_c.py". - asdl.h Header for the corresponding Python/ast.c . - ast.h Declares PyAST_FromNode() external (from Python/ast.c). - code.h Header file for Objects/codeobject.c; contains definition of PyCodeObject. - symtable.h Header for Python/symtable.c . struct symtable and PySTEntryObject are defined here. - pyarena.h Header file for the corresponding Python/pyarena.c . - opcode.h Master list of bytecode; if this file is modified you must modify several other files accordingly (see "Introducing New Bytecode") - Objects/ - codeobject.c Contains PyCodeObject-related code (originally in Python/compile.c). - Lib/ - opcode.py One of the files that must be modified if Include/opcode.h is. - compiler/ - pyassem.py One of the files that must be modified if Include/opcode.h is changed. - pycodegen.py One of the files that must be modified if Include/opcode.h is changed. Known Compiler-related Experiments This section lists known experiments involving the compiler (including bytecode). Skip Montanaro presented a paper at a Python workshop on a peephole optimizer [1]. Michael Hudson has a non-active SourceForge project named Bytecodehacks [2] that provides functionality for playing with bytecode directly. An opcode to combine the functionality of LOAD_ATTR/CALL_FUNCTION was created named CALL_ATTR[3]. Currently only works for classic classes and for new-style classes rough benchmarking showed an actual slowdown thanks to having to support both classic and new-style classes. References Local Variables: mode: indented-text indent-tabs-mode: nil sentence-end-double-space: t fill-column: 80 End: Aho86 Alfred V. Aho, Ravi Sethi, Jeffrey D. Ullman. Compilers: Principles, Techniques, and Tools, http://www.amazon.com/exec/obidos/tg/detail/-/0201100886/104-0162389-6419108 Wang97 Daniel C. Wang, Andrew W. Appel, Jeff L. Korn, and Chris S. Serra. The Zephyr Abstract Syntax Description Language. In Proceedings of the Conference on Domain-Specific Languages, pp. 213--227, 1997. [1] Skip Montanaro's Peephole Optimizer Paper (https://legacy.python.org/workshops/1998-11/proceedings/papers/montanaro/montanaro.html) [2] Bytecodehacks Project (http://bytecodehacks.sourceforge.net/bch-docs/bch/index.html) [3] CALL_ATTR opcode (https://bugs.python.org/issue709744)
python-peps
2024-10-18T13:23:32.585418
2005-02-02T00:00:00
{ "license": "Public Domain", "url": "https://peps.python.org/pep-0339/", "authors": [ "Brett Cannon" ], "pep_number": "0339", "pandoc_version": "3.5" }
0381
PEP: 381 Title: Mirroring infrastructure for PyPI Author: Tarek Ziadé <[email protected]>, Martin von Löwis <[email protected]> Status: Withdrawn Type: Standards Track Topic: Packaging Content-Type: text/x-rst Created: 21-Mar-2009 Post-History: Abstract This PEP describes a mirroring infrastructure for PyPI. PEP Withdrawal The main PyPI web service was moved behind the Fastly caching CDN in May 2013: https://mail.python.org/pipermail/distutils-sig/2013-May/020848.html Subsequently, this arrangement was formalised as an in-kind sponsorship with the PSF, and the PSF has also taken on the task of risk management in the event that that sponsorship arrangement were to ever cease. The download statistics that were previously provided directly on PyPI, are now published indirectly via Google Big Query: https://packaging.python.org/guides/analyzing-pypi-package-downloads/ Accordingly, the mirroring proposal described in this PEP is no longer required, and has been marked as Withdrawn. Rationale PyPI is hosting over 6000 projects and is used on a daily basis by people to build applications. Especially systems like easy_install and zc.buildout make intensive usage of PyPI. For people making intensive use of PyPI, it can act as a single point of failure. People have started to set up some mirrors, both private and public. Those mirrors are active mirrors, which means that they are browsing PyPI to get synced. In order to make the system more reliable, this PEP describes: - the mirror listing and registering at PyPI - the pages a public mirror should maintain. These pages will be used by PyPI, in order to get hit counts and the last modified date. - how a mirror should synchronize with PyPI - how a client can implement a fail-over mechanism Mirror listing and registering People that wants to mirror PyPI make a proposal on catalog-SIG. When a mirror is proposed on the mailing list, it is manually added in a mirror list in the PyPI application after it has been checked to be compliant with the mirroring rules. The mirror list is provided as a list of host names of the form X.pypi.python.org The values of X are the sequence a,b,c,...,aa,ab,... a.pypi.python.org is the master server; the mirrors start with b. A CNAME record last.pypi.python.org points to the last host name. Mirror operators should use a static address, and report planned changes to that address in advance to distutils-sig. The new mirror also appears at http://pypi.python.org/mirrors which is a human-readable page that gives the list of mirrors. This page also explains how to register a new mirror. Statistics page PyPI provides statistics on downloads at /stats. This page is calculated daily by PyPI, by reading all mirrors' local stats and summing them. The stats are presented in daily or monthly files, under /stats/days and /stats/months. Each file is a bzip2 file with these formats: - YYYY-MM-DD.bz2 for daily files - YYYY-MM.bz2 for monthly files Examples: - /stats/days/2008-11-06.bz2 - /stats/days/2008-11-07.bz2 - /stats/days/2008-11-08.bz2 - /stats/months/2008-11.bz2 - /stats/months/2008-10.bz2 Mirror Authenticity With a distributed mirroring system, clients may want to verify that the mirrored copies are authentic. There are multiple threats to consider: 1. the central index may get compromised 2. the central index is assumed to be trusted, but the mirrors might be tampered. 3. a man in the middle between the central index and the end user, or between a mirror and the end user might tamper with datagrams. This specification only deals with the second threat. Some provisions are made to detect man-in-the-middle attacks. To detect the first attack, package authors need to sign their packages using PGP keys, so that users verify that the package comes from the author they trust. The central index provides a DSA key at the URL /serverkey, in the PEM format as generated by "openssl dsa -pubout" (i.e. 3280 SubjectPublicKeyInfo, with the algorithm 1.3.14.3.2.12). This URL must not be mirrored, and clients must fetch the official serverkey from PyPI directly, or use the copy that came with the PyPI client software. Mirrors should still download the key, to detect a key rollover. For each package, a mirrored signature is provided at /serversig/<package>. This is the DSA signature of the parallel URL /simple/<package>, in DER form, using SHA-1 with DSA (i.e. as a 3279 Dsa-Sig-Value, created by algorithm 1.2.840.10040.4.3) Clients using a mirror need to perform the following steps to verify a package: 1. download the /simple page, and compute its SHA-1 hash 2. compute the DSA signature of that hash 3. download the corresponding /serversig, and compare it (byte-for-byte) with the value computed in step 2. 4. compute and verify (against the /simple page) the MD-5 hashes of all files they download from the mirror. An implementation of the verification algorithm is available from https://svn.python.org/packages/trunk/pypi/tools/verify.py Verification is not needed when downloading from central index, and should be avoided to reduce the computation overhead. About once a year, the key will be replaced with a new one. Mirrors will have to re-fetch all /serversig pages. Clients using mirrors need to find a trusted copy of the new server key. One way to obtain one is to download it from https://pypi.python.org/serverkey. To detect man-in-the-middle attacks, clients need to verify the SSL server certificate, which will be signed by the CACert authority. Special pages a mirror needs to provide A mirror is a subset copy of PyPI, so it provides the same structure by copying it. - simple: rest version of the package index - packages: packages, stored by Python version, and letters - serversig: signatures for the simple pages It also needs to provide two specific elements: - last-modified - local-stats Last modified date CPAN uses a freshness date system where the mirror's last synchronisation date is made available. For PyPI, each mirror needs to maintain a URL with simple text content that represents the last synchronisation date the mirror maintains. The date is provided in GMT time, using the ISO 8601 format[1]. Each mirror will be responsible to maintain its last modified date. This page must be located at : /last-modified and must be a text/plain page. Local statistics Each mirror is responsible to count all the downloads that where done via it. This is used by PyPI to sum up all downloads, to be able to display the grand total. These statistics are in CSV-like form, with a header in the first line. It needs to obey PEP 305. Basically, it should be readable by Python's csv module. The fields in this file are: - package: the distutils id of the package. - filename: the filename that has been downloaded. - useragent: the User-Agent of the client that has downloaded the package. - count: the number of downloads. The content will look like this: # package,filename,useragent,count zc.buildout,zc.buildout-1.6.0.tgz,MyAgent,142 ... The counting starts the day the mirror is launched, and there is one file per day, compressed using the bzip2 format. Each file is named like the day. For example, 2008-11-06.bz2 is the file for the 6th of November 2008. They are then provided in a folder called days. For example: - /local-stats/days/2008-11-06.bz2 - /local-stats/days/2008-11-07.bz2 - /local-stats/days/2008-11-08.bz2 This page must be located at /local-stats. How a mirror should synchronize with PyPI A mirroring protocol called Simple Index was described and implemented by Martin v. Loewis and Jim Fulton, based on how easy_install works. This section synthesizes it and gives a few relevant links, plus a small part about User-Agent. The mirroring protocol Mirrors must reduce the amount of data transferred between the central server and the mirror. To achieve that, they MUST use the changelog() PyPI XML-RPC call, and only refetch the packages that have been changed since the last time. For each package P, they MUST copy documents /simple/P/ and /serversig/P. If a package is deleted on the central server, they MUST delete the package and all associated files. To detect modification of package files, they MAY cache the file's ETag, and MAY request skipping it using the If-none-match header. Each mirroring tool MUST identify itself using a descripte User-agent header. The pep381client package[2] provides an application that respects this protocol to browse PyPI. User-agent request header In order to be able to differentiate actions taken by clients over PyPI, a specific user agent name should be provided by all mirroring software. This is also true for all clients like: - zc.buildout[3]. - setuptools[4]. - pip[5]. XXX user agent registering mechanism at PyPI ? How a client can use PyPI and its mirrors Clients that are browsing PyPI should be able to use alternative mirrors, by getting the list of the mirrors using last.pypi.python.org. Code example: >>> import socket >>> socket.gethostbyname_ex('last.pypi.python.org')[0] 'h.pypi.python.org' The clients so far that could use this mechanism: - setuptools - zc.buildout (through setuptools) - pip Fail-over mechanism Clients that are browsing PyPI should be able to use a fail-over mechanism when PyPI or the used mirror is not responding. It is up to the client to decide which mirror should be used, maybe by looking at its geographical location and its responsiveness. This PEP does not describe how this fail-over mechanism should work, but it is strongly encouraged that the clients try to use the nearest mirror. The clients so far that could use this mechanism: - setuptools - zc.buildout (through setuptools) - pip Extra package indexes It is obvious that some packages will not be uploaded to PyPI, whether because they are private or whether because the project maintainer runs their own server where people might get the project package. However, it is strongly encouraged that a public package index follows PyPI and Distutils protocols. In other words, the register and upload command should be compatible with any package index server out there. Software that are compatible with PyPI and Distutils so far: - PloneSoftwareCenter[6] which is used to run plone.org products section. - EggBasket[7]. An extra package index is not a mirror of PyPI, but can have some mirrors itself. Merging several indexes When a client needs to get some packages from several distinct indexes, it should be able to use each one of them as a potential source of packages. Different indexes should be defined as a sorted list for the client to look for a package. Each independent index can of course provide a list of its mirrors. XXX define how to get the hostname for the mirrors of an arbitrary index. That permits all combinations at client level, for a reliable packaging system with all levels of privacy. It is up the client to deal with the merging. References Acknowledgments Georg Brandl. Copyright This document has been placed in the public domain. [1] http://en.wikipedia.org/wiki/ISO_8601 [2] http://pypi.python.org/pypi/pep381client [3] http://pypi.python.org/pypi/zc.buildout [4] http://pypi.python.org/pypi/setuptools [5] http://pypi.python.org/pypi/pip [6] http://plone.org/products/plonesoftwarecenter [7] http://www.chrisarndt.de/projects/eggbasket
python-peps
2024-10-18T13:23:32.608095
2009-03-21T00:00:00
{ "license": "Public Domain", "url": "https://peps.python.org/pep-0381/", "authors": [ "Martin von Löwis", "Tarek Ziadé" ], "pep_number": "0381", "pandoc_version": "3.5" }
0698
PEP: 698 Title: Override Decorator for Static Typing Author: Steven Troxler <[email protected]>, Joshua Xu <[email protected]>, Shannon Zhu <[email protected]> Sponsor: Jelle Zijlstra <jelle.zijlstra at gmail.com> Discussions-To: https://discuss.python.org/t/pep-698-a-typing-override-decorator/20839 Status: Final Type: Standards Track Topic: Typing Created: 05-Sep-2022 Python-Version: 3.12 Post-History: 20-May-2022, 17-Aug-2022, 11-Oct-2022, 07-Nov-2022, Resolution: https://discuss.python.org/t/pep-698-a-typing-override-decorator/20839/11 typing:override and @typing.override <typing.override> Abstract This PEP proposes adding an @override decorator to the Python type system. This will allow type checkers to prevent a class of bugs that occur when a base class changes methods that are inherited by derived classes. Motivation A primary purpose of type checkers is to flag when refactors or changes break pre-existing semantic structures in the code, so users can identify and make fixes across their project without doing a manual audit of their code. Safe Refactoring Python’s type system does not provide a way to identify call sites that need to be changed to stay consistent when an overridden function API changes. This makes refactoring and transforming code more dangerous. Consider this simple inheritance structure: class Parent: def foo(self, x: int) -> int: return x class Child(Parent): def foo(self, x: int) -> int: return x + 1 def parent_callsite(parent: Parent) -> None: parent.foo(1) def child_callsite(child: Child) -> None: child.foo(1) If the overridden method on the superclass is renamed or deleted, type checkers will only alert us to update call sites that deal with the base type directly. But the type checker can only see the new code, not the change we made, so it has no way of knowing that we probably also needed to rename the same method on child classes. A type checker will happily accept this code, even though we are likely introducing bugs: class Parent: # Rename this method def new_foo(self, x: int) -> int: return x class Child(Parent): # This (unchanged) method used to override `foo` but is unrelated to `new_foo` def foo(self, x: int) -> int: return x + 1 def parent_callsite(parent: Parent) -> None: # If we pass a Child instance we’ll now run Parent.new_foo - likely a bug parent.new_foo(1) def child_callsite(child: Child) -> None: # We probably wanted to invoke new_foo here. Instead, we forked the method child.foo(1) This code will type check, but there are two potential sources of bugs: - If we pass a Child instance to the parent_callsite function, it will invoke the implementation in Parent.new_foo. rather than Child.foo. This is probably a bug - we presumably would not have written Child.foo in the first place if we didn’t need custom behavior. - Our system was likely relying on Child.foo behaving in a similar way to Parent.foo. But unless we catch this early, we have now forked the methods, and in future refactors it is likely no one will realize that major changes to the behavior of new_foo likely require updating Child.foo as well, which could lead to major bugs later. The incorrectly-refactored code is type-safe, but is probably not what we intended and could cause our system to behave incorrectly. The bug can be difficult to track down because our new code likely does execute without throwing exceptions. Tests are less likely to catch the problem, and silent errors can take longer to track down in production. We are aware of several production outages in multiple typed codebases caused by such incorrect refactors. This is our primary motivation for adding an @override decorator to the type system, which lets developers express the relationship between Parent.foo and Child.foo so that type checkers can detect the problem. Rationale Subclass Implementations Become More Explicit We believe that explicit overrides will make unfamiliar code easier to read than implicit overrides. A developer reading the implementation of a subclass that uses @override can immediately see which methods are overriding functionality in some base class; without this decorator, the only way to quickly find out is using a static analysis tool. Precedent in Other Languages and Runtime Libraries Static Override Checks in Other Languages Many popular programming languages support override checks. For example: - C++ has override. - C# has override. - Hack has <<__Override>>. - Java has @Override. - Kotlin has override. - Scala has override. - Swift has override. - TypeScript has override. Runtime Override Checks in Python Today, there is an Overrides library that provides decorators @overrides [sic] and @final and will enforce them at runtime. PEP 591 added a @final decorator with the same semantics as those in the Overrides library. But the override component of the runtime library is not supported statically at all, which has added some confusion around the mix/matched support. Providing support for @override in static checks would add value because: - Bugs can be caught earlier, often in-editor. - Static checks come with no performance overhead, unlike runtime checks. - Bugs will be caught quickly even in rarely-used modules, whereas with runtime checks these might go undetected for a time without automated tests of all imports. Disadvantages Using @override will make code more verbose. Specification When type checkers encounter a method decorated with @typing.override they should treat it as a type error unless that method is overriding a compatible method or attribute in some ancestor class. from typing import override class Parent: def foo(self) -> int: return 1 def bar(self, x: str) -> str: return x class Child(Parent): @override def foo(self) -> int: return 2 @override def baz(self) -> int: # Type check error: no matching signature in ancestor return 1 The @override decorator should be permitted anywhere a type checker considers a method to be a valid override, which typically includes not only normal methods but also @property, @staticmethod, and @classmethod. No New Rules for Override Compatibility This PEP is exclusively concerned with the handling of the new @override decorator, which specifies that the decorated method must override some attribute in an ancestor class. This PEP does not propose any new rules regarding the type signatures of such methods. Strict Enforcement Per-Project We believe that @override is most useful if checkers also allow developers to opt into a strict mode where methods that override a parent class are required to use the decorator. Strict enforcement should be opt-in for backward compatibility. Motivation The primary reason for a strict mode that requires @override is that developers can only trust that refactors are override-safe if they know that the @override decorator is used throughout the project. There is another class of bug related to overrides that we can only catch using a strict mode. Consider the following code: class Parent: pass class Child(Parent): def foo(self) -> int: return 2 Imagine we refactor it as follows: class Parent: def foo(self) -> int: # This method is new return 1 class Child(Parent): def foo(self) -> int: # This is now an override! return 2 def call_foo(parent: Parent) -> int: return parent.foo() # This could invoke Child.foo, which may be surprising. The semantics of our code changed here, which could cause two problems: - If the author of the code change did not know that Child.foo already existed (which is very possible in a large codebase), they might be surprised to see that call_foo does not always invoke Parent.foo. - If the codebase authors tried to manually apply @override everywhere when writing overrides in subclasses, they are likely to miss the fact that Child.foo needs it here. At first glance this kind of change may seem unlikely, but it can actually happen often if one or more subclasses have functionality that developers later realize belongs in the base class. With a strict mode, we will always alert developers when this occurs. Precedent Most of the typed, object-oriented programming languages we looked at have an easy way to require explicit overrides throughout a project: - C#, Kotlin, Scala, and Swift always require explicit overrides - TypeScript has a --no-implicit-override flag to force explicit overrides - In Hack and Java the type checker always treats overrides as opt-in, but widely-used linters can warn if explicit overrides are missing. Backward Compatibility By default, the @override decorator will be opt-in. Codebases that do not use it will type-check as before, without the additional type safety. Runtime Behavior Set __override__ = True when possible At runtime, @typing.override will make a best-effort attempt to add an attribute __override__ with value True to its argument. By "best-effort" we mean that we will try adding the attribute, but if that fails (for example because the input is a descriptor type with fixed slots) we will silently return the argument as-is. This is exactly what the @typing.final decorator does, and the motivation is similar: it gives runtime libraries the ability to use @override. As a concrete example, a runtime library could check __override__ in order to automatically populate the __doc__ attribute of child class methods using the parent method docstring. Limitations of setting __override__ As described above, adding __override__ may fail at runtime, in which case we will simply return the argument as-is. In addition, even in cases where it does work, it may be difficult for users to correctly work with multiple decorators, because successfully ensuring the __override__ attribute is set on the final output requires understanding the implementation of each decorator: - The @override decorator needs to execute after ordinary decorators like @functools.lru_cache that use wrapper functions, since we want to set __override__ on the outermost wrapper. This means it needs to go above all these other decorators. - But @override needs to execute before many special descriptor-based decorators like @property, @staticmethod, and @classmethod. - As discussed above, in some cases (for example a descriptor with fixed slots or a descriptor that also wraps) it may be impossible to set the __override__ attribute at all. As a result, runtime support for setting __override__ is best effort only, and we do not expect type checkers to validate the ordering of decorators. Rejected Alternatives Rely on Integrated Development Environments for safety Modern Integrated Development Environments (IDEs) often provide the ability to automatically update subclasses when renaming a method. But we view this as insufficient for several reasons: - If a codebase is split into multiple projects, an IDE will not help and the bug appears when upgrading dependencies. Type checkers are a fast way to catch breaking changes in dependencies. - Not all developers use such IDEs. And library maintainers, even if they do use an IDE, should not need to assume pull request authors use the same IDE. We prefer being able to detect problems in continuous integration without assuming anything about developers’ choice of editor. Runtime enforcement We considered having @typing.override enforce override safety at runtime, similarly to how @overrides.overrides does today. We rejected this for four reasons: - For users of static type checking, it is not clear this brings any benefits. - There would be at least some performance overhead, leading to projects importing slower with runtime enforcement. We estimate the @overrides.overrides implementation takes around 100 microseconds, which is fast but could still add up to a second or more of extra initialization time in million-plus line codebases, which is exactly where we think @typing.override will be most useful. - An implementation may have edge cases where it doesn’t work well (we heard from a maintainer of one such closed-source library that this has been a problem). We expect static enforcement to be simple and reliable. - The implementation approaches we know of are not simple. The decorator executes before the class is finished evaluating, so the options we know of are either to inspect the bytecode of the caller (as @overrides.overrides does) or to use a metaclass-based approach. Neither approach seems ideal. Mark a base class to force explicit overrides on subclasses We considered including a class decorator @require_explicit_overrides, which would have provided a way for base classes to declare that all subclasses must use the @override decorator on method overrides. The Overrides library has a mixin class, EnforceExplicitOverrides, which provides similar behavior in runtime checks. We decided against this because we expect owners of large codebases will benefit most from @override, and for these use cases having a strict mode where explicit @override is required (see the Backward Compatibility section) provides more benefits than a way to mark base classes. Moreover we believe that authors of projects who do not consider the extra type safety to be worth the additional boilerplate of using @override should not be forced to do so. Having an optional strict mode puts the decision in the hands of project owners, whereas the use of @require_explicit_overrides in libraries would force project owners to use @override even if they prefer not to. Include the name of the ancestor class being overridden We considered allowing the caller of @override to specify a specific ancestor class where the overridden method should be defined: class Parent0: def foo(self) -> int: return 1 class Parent1: def bar(self) -> int: return 1 class Child(Parent0, Parent1): @override(Parent0) # okay, Parent0 defines foo def foo(self) -> int: return 2 @override(Parent0) # type error, Parent0 does not define bar def bar(self) -> int: return 2 This could be useful for code readability because it makes the override structure more explicit for deep inheritance trees. It also might catch bugs by prompting developers to check that the implementation of an override still makes sense whenever a method being overridden moves from one base class to another. We decided against it because: - Supporting this would add complexity to the implementation of both @override and type checker support for it, so there would need to be considerable benefits. - We believe that it would be rarely used and catch relatively few bugs. - The author of the Overrides package has noted that early versions of his library included this capability but it was rarely useful and seemed to have little benefit. After it was removed, the ability was never requested by users. Reference Implementation Pyre: A proof of concept is implemented in Pyre: - The decorator @pyre_extensions.override can mark overrides - Pyre can type-check this decorator as specified in this PEP Copyright This document is placed in the public domain or under the CC0-1.0-Universal license, whichever is more permissive.
python-peps
2024-10-18T13:23:32.648486
2022-09-05T00:00:00
{ "license": "Public Domain", "url": "https://peps.python.org/pep-0698/", "authors": [ "Steven Troxler" ], "pep_number": "0698", "pandoc_version": "3.5" }
0439
PEP: 439 Title: Inclusion of implicit pip bootstrap in Python installation Version: $Revision$ Last-Modified: $Date$ Author: Richard Jones <[email protected]> BDFL-Delegate: Alyssa Coghlan <[email protected]> Discussions-To: [email protected] Status: Rejected Type: Standards Track Topic: Packaging Content-Type: text/x-rst Created: 18-Mar-2013 Python-Version: 3.4 Post-History: 19-Mar-2013 Resolution: https://mail.python.org/pipermail/distutils-sig/2013-August/022527.html Abstract This PEP proposes the inclusion of a pip bootstrap executable in the Python installation to simplify the use of 3rd-party modules by Python users. This PEP does not propose to include the pip implementation in the Python standard library. Nor does it propose to implement any package management or installation mechanisms beyond those provided by PEP 427 ("The Wheel Binary Package Format 1.0") and TODO distlib PEP. PEP Rejection This PEP has been rejected in favour of a more explicit mechanism that should achieve the same end result in a more reliable fashion. The more explicit bootstrapping mechanism is described in PEP 453. Rationale Currently the user story for installing 3rd-party Python modules is not as simple as it could be. It requires that all 3rd-party modules inform the user of how to install the installer, typically via a link to the installer. That link may be out of date or the steps required to perform the install of the installer may be enough of a roadblock to prevent the user from further progress. Large Python projects which emphasise a low barrier to entry have shied away from depending on third party packages because of the introduction of this potential stumbling block for new users. With the inclusion of the package installer command in the standard Python installation the barrier to installing additional software is considerably reduced. It is hoped that this will therefore increase the likelihood that Python projects will reuse third party software. The Python community also has an issue of complexity around the current bootstrap procedure for pip and setuptools. They all have their own bootstrap download file with slightly different usages and even refer to each other in some cases. Having a single bootstrap which is common amongst them all, with a simple usage, would be far preferable. It is also hoped that this is reduces the number of proposals to include more and more software in the Python standard library, and therefore that more popular Python software is more easily upgradeable beyond requiring Python installation upgrades. Proposal The bootstrap will install the pip implementation, setuptools by downloading their installation files from PyPI. This proposal affects two components of packaging: the pip bootstrap and, thanks to easier package installation, modifications to publishing packages. The core of this proposal is that the user experience of using pip should not require the user to install pip. The pip bootstrap The Python installation includes an executable called "pip3" (see PEP 394 for naming rationale etc.) that attempts to import pip machinery. If it can then the pip command proceeds as normal. If it cannot it will bootstrap pip by downloading the pip implementation and setuptools wheel files. Hereafter the installation of the "pip implementation" will imply installation of setuptools and virtualenv. Once installed, the pip command proceeds as normal. Once the bootstrap process is complete the "pip3" command is no longer the bootstrap but rather the full pip command. A bootstrap is used in the place of a the full pip code so that we don't have to bundle pip and also pip is upgradeable outside of the regular Python upgrade timeframe and processes. To avoid issues with sudo we will have the bootstrap default to installing the pip implementation to the per-user site-packages directory defined in PEP 370 and implemented in Python 2.6/3.0. Since we avoid installing to the system Python we also avoid conflicting with any other packaging system (on Linux systems, for example.) If the user is inside a PEP 405 virtual environment then the pip implementation will be installed into that virtual environment. The bootstrap process will proceed as follows: 1. The user system has Python (3.4+) installed. In the "scripts" directory of the Python installation there is the bootstrap script called "pip3". 2. The user will invoke a pip command, typically "pip3 install <package>", for example "pip3 install Django". 3. The bootstrap script will attempt to import the pip implementation. If this succeeds, the pip command is processed normally. Stop. 4. On failing to import the pip implementation the bootstrap notifies the user that it needs to "install pip". It will ask the user whether it should install pip as a system-wide site-packages or as a user-only package. This choice will also be present as a command-line option to pip so non-interactive use is possible. 5. The bootstrap will and contact PyPI to obtain the latest download wheel file (see PEP 427.) 6. Upon downloading the file it is installed using "python setup.py install". 7. The pip tool may now import the pip implementation and continues to process the requested user command normally. Users may be running in an environment which cannot access the public Internet and are relying solely on a local package repository. They would use the "-i" (Base URL of Python Package Index) argument to the "pip3 install" command. This simply overrides the default index URL pointing to PyPI. Some users may have no Internet access suitable for fetching the pip implementation file. These users can manually download and install the setuptools and pip tar files. Adding specific support for this use-case is unnecessary. The download of the pip implementation install file will be performed securely. The transport from pypi.python.org will be done over HTTPS with the CA certificate check performed. This facility will be present in Python 3.4+ using Operating System certificates (see PEP XXXX). Beyond those arguments controlling index location and download options, the "pip3" bootstrap command may support further standard pip options for verbosity, quietness and logging. The "pip3" command will support two new command-line options that are used in the bootstrapping, and otherwise ignored. They control where the pip implementation is installed: --bootstrap Install to the user's packages directory. The name of this option is chosen to promote it as the preferred installation option. --bootstrap-to-system Install to the system site-packages directory. These command-line options will also need to be implemented, but otherwise ignored, in the pip implementation. Consideration should be given to defaulting pip to install packages to the user's packages directory if pip is installed in that location. The "--no-install" option to the "pip3" command will not affect the bootstrapping process. Modifications to publishing packages An additional new Python package is proposed, "pypublish", which will be a tool for publishing packages to PyPI. It would replace the current "python setup.py register" and "python setup.py upload" distutils commands. Again because of the measured Python release cycle and extensive existing Python installations these commands are difficult to bugfix and extend. Additionally it is desired that the "register" and "upload" commands be able to be performed over HTTPS with certificate validation. Since shipping CA certificate keychains with Python is not really feasible (updating the keychain is quite difficult to manage) it is desirable that those commands, and the accompanying keychain, be made installable and upgradeable outside of Python itself. The existing distutils mechanisms for package registration and upload would remain, though with a deprecation warning. Implementation The changes to pip required by this PEP are being tracked in that project's issue tracker[1]. Most notably, the addition of --bootstrap and --bootstrap-to-system to the pip command-line. It would be preferable that the pip and setuptools projects distribute a wheel format download. The required code for this implementation is the "pip3" command described above. The additional pypublish can be developed outside of the scope of this PEP's work. Finally, it would be desirable that "pip3" be ported to Python 2.6+ to allow the single command to replace existing pip, setuptools and virtualenv (which would be added to the bootstrap) bootstrap scripts. Having that bootstrap included in a future Python 2.7 release would also be highly desirable. Risks The key that is used to sign the pip implementation download might be compromised and this PEP currently proposes no mechanism for key revocation. There is a Perl package installer also named "pip". It is quite rare and not commonly used. The Fedora variant of Linux has historically named Python's "pip" as "python-pip" and Perl's "pip" as "perl-pip". This policy has been altered[2] so that future and upgraded Fedora installations will use the name "pip" for Python's "pip". Existing (non-upgraded) installations will still have the old name for the Python "pip", though the potential for confusion is now much reduced. References Acknowledgments Alyssa Coghlan for her thoughts on the proposal and dealing with the Red Hat issue. Jannis Leidel and Carl Meyer for their thoughts. Marcus Smith for feedback. Marcela Mašláňová for resolving the Fedora issue. Copyright This document has been placed in the public domain. Local Variables: mode: indented-text indent-tabs-mode: nil sentence-end-double-space: t fill-column: 70 coding: utf-8 End: [1] pip issue tracking work needed for this PEP https://github.com/pypa/pip/issues/863 [2] Fedora's python-pip package does not provide /usr/bin/pip https://bugzilla.redhat.com/show_bug.cgi?id=958377
python-peps
2024-10-18T13:23:32.660844
2013-03-18T00:00:00
{ "license": "Public Domain", "url": "https://peps.python.org/pep-0439/", "authors": [ "Richard Jones" ], "pep_number": "0439", "pandoc_version": "3.5" }
0404
PEP: 404 Title: Python 2.8 Un-release Schedule Author: Barry Warsaw <[email protected]> Status: Final Type: Informational Topic: Release Content-Type: text/x-rst Created: 09-Nov-2011 Python-Version: 2.8 Abstract This document describes the un-development and un-release schedule for Python 2.8. Un-release Manager and Crew Position Name ------------------------ ------------------ 2.8 Un-release Manager Cardinal Biggles Un-release Schedule The current un-schedule is: - 2.8 final Never Official pronouncement Rule number six: there is no official Python 2.8 release. There never will be an official Python 2.8 release. It is an ex-release. Python 2.7 is the end of the Python 2 line of development. Upgrade path The official upgrade path from Python 2.7 is to Python 3. And Now For Something Completely Different In all seriousness, there are important reasons why there won't be an official Python 2.8 release, and why you should plan to migrate instead to Python 3. Python is (as of this writing) more than 20 years old, and Guido and the community have learned a lot in those intervening years. Guido's original concept for Python 3 was to make changes to the language primarily to remove the warts that had grown in the preceding versions. Python 3 was not to be a complete redesign, but instead an evolution of the language, and while maintaining full backward compatibility with Python 2 was explicitly off-the-table, neither were gratuitous changes in syntax or semantics acceptable. In most cases, Python 2 code can be translated fairly easily to Python 3, sometimes entirely mechanically by such tools as 2to3 (there's also a non-trivial subset of the language that will run without modification on both 2.7 and 3.x). Because maintaining multiple versions of Python is a significant drag on the resources of the Python developers, and because the improvements to the language and libraries embodied in Python 3 are so important, it was decided to end the Python 2 lineage with Python 2.7. Thus, all new development occurs in the Python 3 line of development, and there will never be an official Python 2.8 release. Python 2.7 will however be maintained for longer than the usual period of time. Here are some highlights of the significant improvements in Python 3. You can read in more detail on the differences between Python 2 and Python 3. There are also many good guides on porting from Python 2 to Python 3. Strings and bytes Python 2's basic original strings are called 8-bit strings, and they play a dual role in Python 2 as both ASCII text and as byte sequences. While Python 2 also has a unicode string type, the fundamental ambiguity of the core string type, coupled with Python 2's default behavior of supporting automatic coercion from 8-bit strings to unicode objects when the two are combined, often leads to UnicodeErrors. Python 3's standard string type is Unicode based, and Python 3 adds a dedicated bytes type, but critically, no automatic coercion between bytes and unicode strings is provided. The closest the language gets to implicit coercion are a few text-based APIs that assume a default encoding (usually UTF-8) if no encoding is explicitly stated. Thus, the core interpreter, its I/O libraries, module names, etc. are clear in their distinction between unicode strings and bytes. Python 3's unicode support even extends to the filesystem, so that non-ASCII file names are natively supported. This string/bytes clarity is often a source of difficulty in transitioning existing code to Python 3, because many third party libraries and applications are themselves ambiguous in this distinction. Once migrated though, most UnicodeErrors can be eliminated. Numbers Python 2 has two basic integer types, a native machine-sized int type, and an arbitrary length long type. These have been merged in Python 3 into a single int type analogous to Python 2's long type. In addition, integer division now produces floating point numbers for non-integer results. Classes Python 2 has two core class hierarchies, often called classic classes and new-style classes. The latter allow for such things as inheriting from the builtin basic types, support descriptor based tools like the property builtin and provide a generally more sane and coherent system for dealing with multiple inheritance. Python 3 provided the opportunity to completely drop support for classic classes, so all classes in Python 3 automatically use the new-style semantics (although that's a misnomer now). There is no need to explicitly inherit from object or set the default metatype to enable them (in fact, setting a default metatype at the module level is no longer supported - the default metatype is always object). The mechanism for explicitly specifying a metaclass has also changed to use a metaclass keyword argument in the class header line rather than a __metaclass__ magic attribute in the class body. Multiple spellings There are many cases in Python 2 where multiple spellings of some constructs exist, such as repr() and backticks, or the two inequality operators != and <>. In all cases, Python 3 has chosen exactly one spelling and removed the other (e.g. repr() and != were kept). Imports In Python 3, implicit relative imports within packages are no longer available - only absolute imports and explicit relative imports are supported. In addition, star imports (e.g. from x import *) are only permitted in module level code. Also, some areas of the standard library have been reorganized to make the naming scheme more intuitive. Some rarely used builtins have been relocated to standard library modules. Iterators and views Many APIs, which in Python 2 returned concrete lists, in Python 3 now return iterators or lightweight views. Copyright This document has been placed in the public domain.
python-peps
2024-10-18T13:23:32.669940
2011-11-09T00:00:00
{ "license": "Public Domain", "url": "https://peps.python.org/pep-0404/", "authors": [ "Barry Warsaw" ], "pep_number": "0404", "pandoc_version": "3.5" }
0554
PEP: 554 Title: Multiple Interpreters in the Stdlib Author: Eric Snow <[email protected]> Discussions-To: https://discuss.python.org/t/pep-554-multiple-interpreters-in-the-stdlib/24855 Status: Superseded Type: Standards Track Content-Type: text/x-rst Created: 05-Sep-2017 Python-Version: 3.13 Post-History: 07-Sep-2017, 08-Sep-2017, 13-Sep-2017, 05-Dec-2017, 04-May-2020, 14-Mar-2023, 01-Nov-2023, Superseded-By: 734 Note This PEP effectively continues in a cleaner form in PEP 734. This PEP is kept as-is for the sake of the various sections of background information and deferred/rejected ideas that have been stripped from PEP 734. Abstract CPython has supported multiple interpreters in the same process (AKA "subinterpreters") since version 1.5 (1997). The feature has been available via the C-API. [c-api] Multiple interpreters operate in relative isolation from one another, which facilitates novel alternative approaches to concurrency. This proposal introduces the stdlib interpreters module. It exposes the basic functionality of multiple interpreters already provided by the C-API, along with basic support for communicating between interpreters. This module is especially relevant since PEP 684 introduced a per-interpreter GIL in Python 3.12. Proposal Summary: - add a new stdlib module: "interpreters" - add concurrent.futures.InterpreterPoolExecutor - help for extension module maintainers The "interpreters" Module The interpreters module will provide a high-level interface to the multiple interpreter functionality, and wrap a new low-level _interpreters (in the same way as the threading module). See the Examples section for concrete usage and use cases. Along with exposing the existing (in CPython) multiple interpreter support, the module will also support a basic mechanism for passing data between interpreters. That involves setting "shareable" objects in the __main__ module of a target subinterpreter. Some such objects, like os.pipe(), may be used to communicate further. The module will also provide a minimal implementation of "channels" as a demonstration of cross-interpreter communication. Note that objects are not shared between interpreters since they are tied to the interpreter in which they were created. Instead, the objects' data is passed between interpreters. See the Shared Data and API For Communication sections for more details about sharing/communicating between interpreters. API summary for interpreters module Here is a summary of the API for the interpreters module. For a more in-depth explanation of the proposed classes and functions, see the "interpreters" Module API section below. For creating and using interpreters: ----------------------------------------------------------------------- signature description ------------------------------ ---------------------------------------- list_all() -> [Interpreter] Get all existing interpreters. get_current() -> Interpreter Get the currently running interpreter. get_main() -> Interpreter Get the main interpreter. create() -> Interpreter Initialize a new (idle) Python interpreter. ----------------------------------------------------------------------- +---------------------------+------------------------------------------+ | signature | description | +===========================+==========================================+ | class Interpreter | A single interpreter. | +---------------------------+------------------------------------------+ | .id | The interpreter's ID (read-only). | +---------------------------+------------------------------------------+ | .is_running() -> bool | Is the interpreter currently executing | | | code? | +---------------------------+------------------------------------------+ | .close() | Finalize and destroy the interpreter. | +---------------------------+------------------------------------------+ | .set_main_attrs(**kwargs) | Bind "shareable" objects in __main__. | +---------------------------+------------------------------------------+ | .get_main_attr(name) | Get a "shareable" object from __main__. | +---------------------------+------------------------------------------+ | .exec(src_str, /) | Run the given source code in the | | | interpreter | | | (in the current thread). | +---------------------------+------------------------------------------+ For communicating between interpreters: +---------------------------------------+------------------------------+ | signature | description | +=======================================+==============================+ | is_shareable(obj) -> Bool | Can the object's data be | | | passed | | | between interpreters? | +---------------------------------------+------------------------------+ | create_ch | Create a new channel for | | annel() -> (RecvChannel, SendChannel) | passing | | | data between interpreters. | +---------------------------------------+------------------------------+ concurrent.futures.InterpreterPoolExecutor An executor will be added that extends ThreadPoolExecutor to run per-thread tasks in subinterpreters. Initially, the only supported tasks will be whatever Interpreter.exec() takes (e.g. a str script). However, we may also support some functions, as well as eventually a separate method for pickling the task and arguments, to reduce friction (at the expense of performance for short-running tasks). Help for Extension Module Maintainers In practice, an extension that implements multi-phase init (PEP 489) is considered isolated and thus compatible with multiple interpreters. Otherwise it is "incompatible". Many extension modules are still incompatible. The maintainers and users of such extension modules will both benefit when they are updated to support multiple interpreters. In the meantime, users may become confused by failures when using multiple interpreters, which could negatively impact extension maintainers. See Concerns below. To mitigate that impact and accelerate compatibility, we will do the following: - be clear that extension modules are not required to support use in multiple interpreters - raise ImportError when an incompatible module is imported in a subinterpreter - provide resources (e.g. docs) to help maintainers reach compatibility - reach out to the maintainers of Cython and of the most used extension modules (on PyPI) to get feedback and possibly provide assistance Examples Run isolated code in current OS thread interp = interpreters.create() print('before') interp.exec('print("during")') print('after') Run in a different thread interp = interpreters.create() def run(): interp.exec('print("during")') t = threading.Thread(target=run) print('before') t.start() t.join() print('after') Pre-populate an interpreter interp = interpreters.create() interp.exec(tw.dedent(""" import some_lib import an_expensive_module some_lib.set_up() """)) wait_for_request() interp.exec(tw.dedent(""" some_lib.handle_request() """)) Handling an exception interp = interpreters.create() try: interp.exec(tw.dedent(""" raise KeyError """)) except interpreters.RunFailedError as exc: print(f"got the error from the subinterpreter: {exc}") Re-raising an exception interp = interpreters.create() try: try: interp.exec(tw.dedent(""" raise KeyError """)) except interpreters.RunFailedError as exc: raise exc.__cause__ except KeyError: print("got a KeyError from the subinterpreter") Note that this pattern is a candidate for later improvement. Interact with the __main__ namespace interp = interpreters.create() interp.set_main_attrs(a=1, b=2) interp.exec(tw.dedent(""" res = do_something(a, b) """)) res = interp.get_main_attr('res') Synchronize using an OS pipe interp = interpreters.create() r1, s1 = os.pipe() r2, s2 = os.pipe() def task(): interp.exec(tw.dedent(f""" import os os.read({r1}, 1) print('during B') os.write({s2}, '') """)) t = threading.thread(target=task) t.start() print('before') os.write(s1, '') print('during A') os.read(r2, 1) print('after') t.join() Sharing a file descriptor interp = interpreters.create() with open('spamspamspam') as infile: interp.set_main_attrs(fd=infile.fileno()) interp.exec(tw.dedent(f""" import os for line in os.fdopen(fd): print(line) """)) Passing objects via pickle interp = interpreters.create() r, s = os.pipe() interp.exec(tw.dedent(f""" import os import pickle reader = {r} """)) interp.exec(tw.dedent(""" data = b'' c = os.read(reader, 1) while c != b'\x00': while c != b'\x00': data += c c = os.read(reader, 1) obj = pickle.loads(data) do_something(obj) c = os.read(reader, 1) """)) for obj in input: data = pickle.dumps(obj) os.write(s, data) os.write(s, b'\x00') os.write(s, b'\x00') Capturing an interpreter's stdout interp = interpreters.create() stdout = io.StringIO() with contextlib.redirect_stdout(stdout): interp.exec(tw.dedent(""" print('spam!') """)) assert(stdout.getvalue() == 'spam!') # alternately: interp.exec(tw.dedent(""" import contextlib, io stdout = io.StringIO() with contextlib.redirect_stdout(stdout): print('spam!') captured = stdout.getvalue() """)) captured = interp.get_main_attr('captured') assert(captured == 'spam!') A pipe (os.pipe()) could be used similarly. Running a module interp = interpreters.create() main_module = mod_name interp.exec(f'import runpy; runpy.run_module({main_module!r})') Running as script (including zip archives & directories) interp = interpreters.create() main_script = path_name interp.exec(f"import runpy; runpy.run_path({main_script!r})") Using a channel to communicate tasks_recv, tasks = interpreters.create_channel() results, results_send = interpreters.create_channel() def worker(): interp = interpreters.create() interp.set_main_attrs(tasks=tasks_recv, results=results_send) interp.exec(tw.dedent(""" def handle_request(req): ... def capture_exception(exc): ... while True: try: req = tasks.recv() except Exception: # channel closed break try: res = handle_request(req) except Exception as exc: res = capture_exception(exc) results.send_nowait(res) """)) threads = [threading.Thread(target=worker) for _ in range(20)] for t in threads: t.start() requests = ... for req in requests: tasks.send(req) tasks.close() for t in threads: t.join() Sharing a memoryview (imagine map-reduce) data, chunksize = read_large_data_set() buf = memoryview(data) numchunks = (len(buf) + 1) / chunksize results = memoryview(b'\0' * numchunks) tasks_recv, tasks = interpreters.create_channel() def worker(): interp = interpreters.create() interp.set_main_attrs(data=buf, results=results, tasks=tasks_recv) interp.exec(tw.dedent(""" while True: try: req = tasks.recv() except Exception: # channel closed break resindex, start, end = req chunk = data[start: end] res = reduce_chunk(chunk) results[resindex] = res """)) t = threading.Thread(target=worker) t.start() for i in range(numchunks): if not workers_running(): raise ... start = i * chunksize end = start + chunksize if end > len(buf): end = len(buf) tasks.send((start, end, i)) tasks.close() t.join() use_results(results) Rationale Running code in multiple interpreters provides a useful level of isolation within the same process. This can be leveraged in a number of ways. Furthermore, subinterpreters provide a well-defined framework in which such isolation may extended. (See PEP 684.) Alyssa (Nick) Coghlan explained some of the benefits through a comparison with multi-processing [benefits]: [I] expect that communicating between subinterpreters is going to end up looking an awful lot like communicating between subprocesses via shared memory. The trade-off between the two models will then be that one still just looks like a single process from the point of view of the outside world, and hence doesn't place any extra demands on the underlying OS beyond those required to run CPython with a single interpreter, while the other gives much stricter isolation (including isolating C globals in extension modules), but also demands much more from the OS when it comes to its IPC capabilities. The security risk profiles of the two approaches will also be quite different, since using subinterpreters won't require deliberately poking holes in the process isolation that operating systems give you by default. CPython has supported multiple interpreters, with increasing levels of support, since version 1.5. While the feature has the potential to be a powerful tool, it has suffered from neglect because the multiple interpreter capabilities are not readily available directly from Python. Exposing the existing functionality in the stdlib will help reverse the situation. This proposal is focused on enabling the fundamental capability of multiple interpreters, isolated from each other, in the same Python process. This is a new area for Python so there is relative uncertainly about the best tools to provide as companions to interpreters. Thus we minimize the functionality we add in the proposal as much as possible. Concerns - "subinterpreters are not worth the trouble" Some have argued that subinterpreters do not add sufficient benefit to justify making them an official part of Python. Adding features to the language (or stdlib) has a cost in increasing the size of the language. So an addition must pay for itself. In this case, multiple interpreter support provide a novel concurrency model focused on isolated threads of execution. Furthermore, they provide an opportunity for changes in CPython that will allow simultaneous use of multiple CPU cores (currently prevented by the GIL--see PEP 684). Alternatives to subinterpreters include threading, async, and multiprocessing. Threading is limited by the GIL and async isn't the right solution for every problem (nor for every person). Multiprocessing is likewise valuable in some but not all situations. Direct IPC (rather than via the multiprocessing module) provides similar benefits but with the same caveat. Notably, subinterpreters are not intended as a replacement for any of the above. Certainly they overlap in some areas, but the benefits of subinterpreters include isolation and (potentially) performance. In particular, subinterpreters provide a direct route to an alternate concurrency model (e.g. CSP) which has found success elsewhere and will appeal to some Python users. That is the core value that the interpreters module will provide. - "stdlib support for multiple interpreters adds extra burden on C extension authors" In the Interpreter Isolation section below we identify ways in which isolation in CPython's subinterpreters is incomplete. Most notable is extension modules that use C globals to store internal state. (PEP 3121 and PEP 489 provide a solution to that problem, followed by some extra APIs that improve efficiency, e.g. PEP 573). Consequently, projects that publish extension modules may face an increased maintenance burden as their users start using subinterpreters, where their modules may break. This situation is limited to modules that use C globals (or use libraries that use C globals) to store internal state. For numpy, the reported-bug rate is one every 6 months. [bug-rate] Ultimately this comes down to a question of how often it will be a problem in practice: how many projects would be affected, how often their users will be affected, what the additional maintenance burden will be for projects, and what the overall benefit of subinterpreters is to offset those costs. The position of this PEP is that the actual extra maintenance burden will be small and well below the threshold at which subinterpreters are worth it. - "creating a new concurrency API deserves much more thought and experimentation, so the new module shouldn't go into the stdlib right away, if ever" Introducing an API for a new concurrency model, like happened with asyncio, is an extremely large project that requires a lot of careful consideration. It is not something that can be done as simply as this PEP proposes and likely deserves significant time on PyPI to mature. (See Nathaniel's post on python-dev.) However, this PEP does not propose any new concurrency API. At most it exposes minimal tools (e.g. subinterpreters, channels) which may be used to write code that follows patterns associated with (relatively) new-to-Python concurrency models. Those tools could also be used as the basis for APIs for such concurrency models. Again, this PEP does not propose any such API. - "there is no point to exposing subinterpreters if they still share the GIL" - "the effort to make the GIL per-interpreter is disruptive and risky" A common misconception is that this PEP also includes a promise that interpreters will no longer share the GIL. When that is clarified, the next question is "what is the point?". This is already answered at length in this PEP. Just to be clear, the value lies in: * increase exposure of the existing feature, which helps improve the code health of the entire CPython runtime * expose the (mostly) isolated execution of interpreters * preparation for per-interpreter GIL * encourage experimentation - "data sharing can have a negative impact on cache performance in multi-core scenarios" (See [cache-line-ping-pong].) This shouldn't be a problem for now as we have no immediate plans to actually share data between interpreters, instead focusing on copying. About Subinterpreters Concurrency Concurrency is a challenging area of software development. Decades of research and practice have led to a wide variety of concurrency models, each with different goals. Most center on correctness and usability. One class of concurrency models focuses on isolated threads of execution that interoperate through some message passing scheme. A notable example is Communicating Sequential Processes [CSP] (upon which Go's concurrency is roughly based). The intended isolation inherent to CPython's interpreters makes them well-suited to this approach. Shared Data CPython's interpreters are inherently isolated (with caveats explained below), in contrast to threads. So the same communicate-via-shared-memory approach doesn't work. Without an alternative, effective use of concurrency via multiple interpreters is significantly limited. The key challenge here is that sharing objects between interpreters faces complexity due to various constraints on object ownership, visibility, and mutability. At a conceptual level it's easier to reason about concurrency when objects only exist in one interpreter at a time. At a technical level, CPython's current memory model limits how Python objects may be shared safely between interpreters; effectively, objects are bound to the interpreter in which they were created. Furthermore, the complexity of object sharing increases as interpreters become more isolated, e.g. after GIL removal (though this is mitigated somewhat for some "immortal" objects (see PEP 683). Consequently, the mechanism for sharing needs to be carefully considered. There are a number of valid solutions, several of which may be appropriate to support in Python's stdlib and C-API. Any such solution is likely to share many characteristics with the others. In the meantime, we propose here a minimal solution (Interpreter.set_main_attrs()), which sets some precedent for how objects are shared. More importantly, it facilitates the introduction of more advanced approaches later and allows them to coexist and cooperate. In part to demonstrate that, we will provide a basic implementation of "channels", as a somewhat more advanced sharing solution. Separate proposals may cover: - the addition of a public C-API based on the implementation Interpreter.set_main_attrs() - the addition of other sharing approaches to the "interpreters" module The fundamental enabling feature for communication is that most objects can be converted to some encoding of underlying raw data, which is safe to be passed between interpreters. For example, an int object can be turned into a C long value, sent to another interpreter, and turned back into an int object there. As another example, None may be passed as-is. Regardless, the effort to determine the best way forward here is mostly outside the scope of this PEP. In the meantime, this proposal describes a basic interim solution using pipes (os.pipe()), as well as providing a dedicated capability ("channels"). See API For Communication below. Interpreter Isolation CPython's interpreters are intended to be strictly isolated from each other. Each interpreter has its own copy of all modules, classes, functions, and variables. The same applies to state in C, including in extension modules. The CPython C-API docs explain more. [caveats] However, there are ways in which interpreters do share some state. First of all, some process-global state remains shared: - file descriptors - low-level env vars - process memory (though allocators are isolated) - builtin types (e.g. dict, bytes) - singletons (e.g. None) - underlying static module data (e.g. functions) for builtin/extension/frozen modules There are no plans to change this. Second, some isolation is faulty due to bugs or implementations that did not take subinterpreters into account. This includes things like extension modules that rely on C globals. [cryptography] In these cases bugs should be opened (some are already): - readline module hook functions (http://bugs.python.org/issue4202) - memory leaks on re-init (http://bugs.python.org/issue21387) Finally, some potential isolation is missing due to the current design of CPython. Improvements are currently going on to address gaps in this area: - extensions using the PyGILState_* API are somewhat incompatible [gilstate] Existing Usage Multiple interpreter support has not been a widely used feature. In fact, there have been only a handful of documented cases of widespread usage, including mod_wsgi, OpenStack Ceph, and JEP. On the one hand, these cases provide confidence that existing multiple interpreter support is relatively stable. On the other hand, there isn't much of a sample size from which to judge the utility of the feature. Alternate Python Implementations I've solicited feedback from various Python implementors about support for subinterpreters. Each has indicated that they would be able to support multiple interpreters in the same process (if they choose to) without a lot of trouble. Here are the projects I contacted: - jython ([jython]) - ironpython (personal correspondence) - pypy (personal correspondence) - micropython (personal correspondence) "interpreters" Module API The module provides the following functions: list_all() -> [Interpreter] Return a list of all existing interpreters. get_current() => Interpreter Return the currently running interpreter. get_main() => Interpreter Return the main interpreter. If the Python implementation has no concept of a main interpreter then return None. create() -> Interpreter Initialize a new Python interpreter and return it. It will remain idle until something is run in it and always run in its own thread. is_shareable(obj) -> bool: Return True if the object may be "shared" between interpreters. This does not necessarily mean that the actual objects will be shared. Instead, it means that the objects' underlying data will be shared in a cross-interpreter way, whether via a proxy, a copy, or some other means. The module also provides the following class: class Interpreter(id): id -> int: The interpreter's ID. (read-only) is_running() -> bool: Return whether or not the interpreter's "exec()" is currently executing code. Code running in subthreads is ignored. Calling this on the current interpreter will always return True. close(): Finalize and destroy the interpreter. This may not be called on an already running interpreter. Doing so results in a RuntimeError. set_main_attrs(iterable_or_mapping, /): set_main_attrs(**kwargs): Set attributes in the interpreter's __main__ module corresponding to the given name-value pairs. Each value must be a "shareable" object and will be converted to a new object (e.g. copy, proxy) in whatever way that object's type defines. If an attribute with the same name is already set, it will be overwritten. This method is helpful for setting up an interpreter before calling exec(). get_main_attr(name, default=None, /): Return the value of the corresponding attribute of the interpreter's __main__ module. If the attribute isn't set then the default is returned. If it is set, but the value isn't "shareable" then a ValueError is raised. This may be used to introspect the __main__ module, as well as a very basic mechanism for "returning" one or more results from Interpreter.exec(). exec(source_str, /): Run the provided Python source code in the interpreter, in its __main__ module. This may not be called on an already running interpreter. Doing so results in a RuntimeError. An "interp.exec()" call is similar to a builtin exec() call (or to calling a function that returns None). Once "interp.exec()" completes, the code that called "exec()" continues executing (in the original interpreter). Likewise, if there is any uncaught exception then it effectively (see below) propagates into the code where ``interp.exec()`` was called. Like exec() (and threads), but unlike function calls, there is no return value. If any "return" value from the code is needed, send the data out via a pipe (os.pipe()) or channel or other cross-interpreter communication mechanism. The big difference from exec() or functions is that "interp.exec()" executes the code in an entirely different interpreter, with entirely separate state. The interpreters are completely isolated from each other, so the state of the original interpreter (including the code it was executing in the current OS thread) does not affect the state of the target interpreter (the one that will execute the code). Likewise, the target does not affect the original, nor any of its other threads. Instead, the state of the original interpreter (for this thread) is frozen, and the code it's executing code completely blocks. At that point, the target interpreter is given control of the OS thread. Then, when it finishes executing, the original interpreter gets control back and continues executing. So calling "interp.exec()" will effectively cause the current Python thread to completely pause. Sometimes you won't want that pause, in which case you should make the "exec()" call in another thread. To do so, add a function that calls "interp.exec()" and then run that function in a normal "threading.Thread". Note that the interpreter's state is never reset, neither before "interp.exec()" executes the code nor after. Thus the interpreter state is preserved between calls to "interp.exec()". This includes "sys.modules", the "builtins" module, and the internal state of C extension modules. Also note that "interp.exec()" executes in the namespace of the "__main__" module, just like scripts, the REPL, "-m", and "-c". Just as the interpreter's state is not ever reset, the "__main__" module is never reset. You can imagine concatenating the code from each "interp.exec()" call into one long script. This is the same as how the REPL operates. Supported code: source text. In addition to the functionality of Interpreter.set_main_attrs(), the module provides a related way to pass data between interpreters: channels. See Channels below. Uncaught Exceptions Regarding uncaught exceptions in Interpreter.exec(), we noted that they are "effectively" propagated into the code where interp.exec() was called. To prevent leaking exceptions (and tracebacks) between interpreters, we create a surrogate of the exception and its traceback (see traceback.TracebackException), set it to __cause__ on a new interpreters.RunFailedError, and raise that. Directly raising (a proxy of) the exception is problematic since it's harder to distinguish between an error in the interp.exec() call and an uncaught exception from the subinterpreter. Interpreter Restrictions Every new interpreter created by interpreters.create() now has specific restrictions on any code it runs. This includes the following: - importing an extension module fails if it does not implement multi-phase init - daemon threads may not be created - os.fork() is not allowed (so no multiprocessing) - os.exec*() is not allowed (but "fork+exec", a la subprocess is okay) Note that interpreters created with the existing C-API do not have these restrictions. The same is true for the "main" interpreter, so existing use of Python will not change. We may choose to later loosen some of the above restrictions or provide a way to enable/disable granular restrictions individually. Regardless, requiring multi-phase init from extension modules will always be a default restriction. API For Communication As discussed in Shared Data above, multiple interpreter support is less useful without a mechanism for sharing data (communicating) between them. Sharing actual Python objects between interpreters, however, has enough potential problems that we are avoiding support for that in this proposal. Nor, as mentioned earlier, are we adding anything more than a basic mechanism for communication. That mechanism is the Interpreter.set_main_attrs() method. It may be used to set up global variables before Interpreter.exec() is called. The name-value pairs passed to set_main_attrs() are bound as attributes of the interpreter's __main__ module. The values must be "shareable". See Shareable Types below. Additional approaches to communicating and sharing objects are enabled through Interpreter.set_main_attrs(). A shareable object could be implemented which works like a queue, but with cross-interpreter safety. In fact, this PEP does include an example of such an approach: channels. Shareable Types An object is "shareable" if its type supports shareable instances. The type must implement a new internal protocol, which is used to convert an object to interpreter-independent data and then converted back to an object on the other side. Also see is_shareable() above. A minimal set of simple, immutable builtin types will be supported initially, including: - None - bool - bytes - str - int - float We will also support a small number of complex types initially: - memoryview, to allow sharing PEP 3118 buffers - channels Further builtin types may be supported later, complex or not. Limiting the initial shareable types is a practical matter, reducing the potential complexity of the initial implementation. There are a number of strategies we may pursue in the future to expand supported objects, once we have more experience with interpreter isolation. In the meantime, a separate proposal will discuss making the internal protocol (and C-API) used by Interpreter.set_main_attrs() public. With that protocol, support for other types could be added by extension modules. Communicating Through OS Pipes Even without a dedicated object for communication, users may already use existing tools. For example, one basic approach for sending data between interpreters is to use a pipe (see os.pipe()): 1. interpreter A calls os.pipe() to get a read/write pair of file descriptors (both int objects) 2. interpreter A calls interp.set_main_attrs(), binding the read FD (or embeds it using string formatting) 3. interpreter A calls interp.exec() on interpreter B 4. interpreter A writes some bytes to the write FD 5. interpreter B reads those bytes Several of the earlier examples demonstrate this, such as Synchronize using an OS pipe. Channels The interpreters module will include a dedicated solution for passing object data between interpreters: channels. They are included in the module in part to provide an easier mechanism than using os.pipe() and in part to demonstrate how libraries may take advantage of Interpreter.set_main_attrs() and the protocol it uses. A channel is a simplex FIFO. It is a basic, opt-in data sharing mechanism that draws inspiration from pipes, queues, and CSP's channels. [fifo] The main difference from pipes is that channels can be associated with zero or more interpreters on either end. Like queues, which are also many-to-many, channels are buffered (though they also offer methods with unbuffered semantics). Channels have two operations: send and receive. A key characteristic of those operations is that channels transmit data derived from Python objects rather than the objects themselves. When objects are sent, their data is extracted. When the "object" is received in the other interpreter, the data is converted back into an object owned by that interpreter. To make this work, the mutable shared state will be managed by the Python runtime, not by any of the interpreters. Initially we will support only one type of objects for shared state: the channels provided by interpreters.create_channel(). Channels, in turn, will carefully manage passing objects between interpreters. This approach, including keeping the API minimal, helps us avoid further exposing any underlying complexity to Python users. The interpreters module provides the following function related to channels: create_channel() -> (RecvChannel, SendChannel): Create a new channel and return (recv, send), the RecvChannel and SendChannel corresponding to the ends of the channel. Both ends of the channel are supported "shared" objects (i.e. may be safely shared by different interpreters. Thus they may be set using "Interpreter.set_main_attrs()". The module also provides the following channel-related classes: class RecvChannel(id): The receiving end of a channel. An interpreter may use this to receive objects from another interpreter. Any type supported by Interpreter.set_main_attrs() will be supported here, though at first only a few of the simple, immutable builtin types will be supported. id -> int: The channel's unique ID. The "send" end has the same one. recv(*, timeout=None): Return the next object from the channel. If none have been sent then wait until the next send (or until the timeout is hit). At the least, the object will be equivalent to the sent object. That will almost always mean the same type with the same data, though it could also be a compatible proxy. Regardless, it may use a copy of that data or actually share the data. That's up to the object's type. recv_nowait(default=None): Return the next object from the channel. If none have been sent then return the default. Otherwise, this is the same as the "recv()" method. class SendChannel(id): The sending end of a channel. An interpreter may use this to send objects to another interpreter. Any type supported by Interpreter.set_main_attrs() will be supported here, though at first only a few of the simple, immutable builtin types will be supported. id -> int: The channel's unique ID. The "recv" end has the same one. send(obj, *, timeout=None): Send the object (i.e. its data) to the "recv" end of the channel. Wait until the object is received. If the object is not shareable then ValueError is raised. The builtin memoryview is supported, so sending a buffer across involves first wrapping the object in a memoryview and then sending that. send_nowait(obj): Send the object to the "recv" end of the channel. This behaves the same as "send()", except for the waiting part. If no interpreter is currently receiving (waiting on the other end) then queue the object and return False. Otherwise return True. Caveats For Shared Objects Again, Python objects are not shared between interpreters. However, in some cases data those objects wrap is actually shared and not just copied. One example might be PEP 3118 buffers. In those cases the object in the original interpreter is kept alive until the shared data in the other interpreter is no longer used. Then object destruction can happen like normal in the original interpreter, along with the previously shared data. Documentation The new stdlib docs page for the interpreters module will include the following: - (at the top) a clear note that support for multiple interpreters is not required from extension modules - some explanation about what subinterpreters are - brief examples of how to use multiple interpreters (and communicating between them) - a summary of the limitations of using multiple interpreters - (for extension maintainers) a link to the resources for ensuring multiple interpreters compatibility - much of the API information in this PEP Docs about resources for extension maintainers already exist on the Isolating Extension Modules howto page. Any extra help will be added there. For example, it may prove helpful to discuss strategies for dealing with linked libraries that keep their own subinterpreter-incompatible global state. Note that the documentation will play a large part in mitigating any negative impact that the new interpreters module might have on extension module maintainers. Also, the ImportError for incompatible extension modules will be updated to clearly say it is due to missing multiple interpreters compatibility and that extensions are not required to provide it. This will help set user expectations properly. Alternative Solutions One possible alternative to a new module is to add support for interpreters to concurrent.futures. There are several reasons why that wouldn't work: - the obvious place to look for multiple interpreters support is an "interpreters" module, much as with "threading", etc. - concurrent.futures is all about executing functions but currently we don't have a good way to run a function from one interpreter in another Similar reasoning applies for support in the multiprocessing module. Open Questions - will is be too confusing that interp.exec() runs in the current thread? - should we add pickling fallbacks right now for interp.exec(), and/or Interpreter.set_main_attrs() and Interpreter.get_main_attr()? - should we support (limited) functions in interp.exec() right now? - rename Interpreter.close() to Interpreter.destroy()? - drop Interpreter.get_main_attr(), since we have channels? - should channels be its own PEP? Deferred Functionality In the interest of keeping this proposal minimal, the following functionality has been left out for future consideration. Note that this is not a judgement against any of said capability, but rather a deferment. That said, each is arguably valid. Add convenience API There are a number of things I can imagine would smooth out hypothetical rough edges with the new module: - add something like Interpreter.run() or Interpreter.call() that calls interp.exec() and falls back to pickle - fall back to pickle in Interpreter.set_main_attrs() and Interpreter.get_main_attr() These would be easy to do if this proves to be a pain point. Avoid possible confusion about interpreters running in the current thread One regular point of confusion has been that Interpreter.exec() executes in the current OS thread, temporarily blocking the current Python thread. It may be worth doing something to avoid that confusion. Some possible solutions for this hypothetical problem: - by default, run in a new thread? - add Interpreter.exec_in_thread()? - add Interpreter.exec_in_current_thread()? In earlier versions of this PEP the method was interp.run(). The simple change to interp.exec() alone will probably reduce confusion sufficiently, when coupled with educating users via the docs. It it turns out to be a real problem, we can pursue one of the alternatives at that point. Clarify "running" vs. "has threads" Interpreter.is_running() refers specifically to whether or not Interpreter.exec() (or similar) is running somewhere. It does not say anything about if the interpreter has any subthreads running. That information might be helpful. Some things we could do: - rename Interpreter.is_running() to Interpreter.is_running_main() - add Interpreter.has_threads(), to complement Interpreter.is_running() - expand to Interpreter.is_running(main=True, threads=False) None of these are urgent and any could be done later, if desired. A Dunder Method For Sharing We could add a special method, like __xid__ to correspond to tp_xid. At the very least, it would allow Python types to convert their instances to some other type that implements tp_xid. The problem is that exposing this capability to Python code presents a degree of complixity that hasn't been explored yet, nor is there a compelling case to investigate that complexity. Interpreter.call() It would be convenient to run existing functions in subinterpreters directly. Interpreter.exec() could be adjusted to support this or a call() method could be added: Interpreter.call(f, *args, **kwargs) This suffers from the same problem as sharing objects between interpreters via queues. The minimal solution (running a source string) is sufficient for us to get the feature out where it can be explored. Interpreter.run_in_thread() This method would make a interp.exec() call for you in a thread. Doing this using only threading.Thread and interp.exec() is relatively trivial so we've left it out. Synchronization Primitives The threading module provides a number of synchronization primitives for coordinating concurrent operations. This is especially necessary due to the shared-state nature of threading. In contrast, interpreters do not share state. Data sharing is restricted to the runtime's shareable objects capability, which does away with the need for explicit synchronization. If any sort of opt-in shared state support is added to CPython's interpreters in the future, that same effort can introduce synchronization primitives to meet that need. CSP Library A csp module would not be a large step away from the functionality provided by this PEP. However, adding such a module is outside the minimalist goals of this proposal. Syntactic Support The Go language provides a concurrency model based on CSP, so it's similar to the concurrency model that multiple interpreters support. However, Go also provides syntactic support, as well as several builtin concurrency primitives, to make concurrency a first-class feature. Conceivably, similar syntactic (and builtin) support could be added to Python using interpreters. However, that is way outside the scope of this PEP! Multiprocessing The multiprocessing module could support interpreters in the same way it supports threads and processes. In fact, the module's maintainer, Davin Potts, has indicated this is a reasonable feature request. However, it is outside the narrow scope of this PEP. C-extension opt-in/opt-out By using the PyModuleDef_Slot introduced by PEP 489, we could easily add a mechanism by which C-extension modules could opt out of multiple interpreter support. Then the import machinery, when operating in a subinterpreter, would need to check the module for support. It would raise an ImportError if unsupported. Alternately we could support opting in to multiple interpreters support. However, that would probably exclude many more modules (unnecessarily) than the opt-out approach. Also, note that PEP 489 defined that an extension's use of the PEP's machinery implies multiple interpreters support. The scope of adding the ModuleDef slot and fixing up the import machinery is non-trivial, but could be worth it. It all depends on how many extension modules break under subinterpreters. Given that there are relatively few cases we know of through mod_wsgi, we can leave this for later. Poisoning channels CSP has the concept of poisoning a channel. Once a channel has been poisoned, any send() or recv() call on it would raise a special exception, effectively ending execution in the interpreter that tried to use the poisoned channel. This could be accomplished by adding a poison() method to both ends of the channel. The close() method can be used in this way (mostly), but these semantics are relatively specialized and can wait. Resetting __main__ As proposed, every call to Interpreter.exec() will execute in the namespace of the interpreter's existing __main__ module. This means that data persists there between interp.exec() calls. Sometimes this isn't desirable and you want to execute in a fresh __main__. Also, you don't necessarily want to leak objects there that you aren't using any more. Note that the following won't work right because it will clear too much (e.g. __name__ and the other "__dunder__" attributes: interp.exec('globals().clear()') Possible solutions include: - a create() arg to indicate resetting __main__ after each interp.exec() call - an Interpreter.reset_main flag to support opting in or out after the fact - an Interpreter.reset_main() method to opt in when desired - importlib.util.reset_globals() [reset_globals] Also note that resetting __main__ does nothing about state stored in other modules. So any solution would have to be clear about the scope of what is being reset. Conceivably we could invent a mechanism by which any (or every) module could be reset, unlike reload() which does not clear the module before loading into it. Regardless, since __main__ is the execution namespace of the interpreter, resetting it has a much more direct correlation to interpreters and their dynamic state than does resetting other modules. So a more generic module reset mechanism may prove unnecessary. This isn't a critical feature initially. It can wait until later if desirable. Resetting an interpreter's state It may be nice to re-use an existing subinterpreter instead of spinning up a new one. Since an interpreter has substantially more state than just the __main__ module, it isn't so easy to put an interpreter back into a pristine/fresh state. In fact, there may be parts of the state that cannot be reset from Python code. A possible solution is to add an Interpreter.reset() method. This would put the interpreter back into the state it was in when newly created. If called on a running interpreter it would fail (hence the main interpreter could never be reset). This would likely be more efficient than creating a new interpreter, though that depends on what optimizations will be made later to interpreter creation. While this would potentially provide functionality that is not otherwise available from Python code, it isn't a fundamental functionality. So in the spirit of minimalism here, this can wait. Regardless, I doubt it would be controversial to add it post-PEP. Copy an existing interpreter's state Relatedly, it may be useful to support creating a new interpreter based on an existing one, e.g. Interpreter.copy(). This ties into the idea that a snapshot could be made of an interpreter's memory, which would make starting up CPython, or creating new interpreters, faster in general. The same mechanism could be used for a hypothetical Interpreter.reset(), as described previously. Shareable file descriptors and sockets Given that file descriptors and sockets are process-global resources, making them shareable is a reasonable idea. They would be a good candidate for the first effort at expanding the supported shareable types. They aren't strictly necessary for the initial API. Integration with async Per Antoine Pitrou [async]: Has any thought been given to how FIFOs could integrate with async code driven by an event loop (e.g. asyncio)? I think the model of executing several asyncio (or Tornado) applications each in their own subinterpreter may prove quite interesting to reconcile multi- core concurrency with ease of programming. That would require the FIFOs to be able to synchronize on something an event loop can wait on (probably a file descriptor?). The basic functionality of multiple interpreters support does not depend on async and can be added later. A possible solution is to provide async implementations of the blocking channel methods (recv(), and send()). Alternately, "readiness callbacks" could be used to simplify use in async scenarios. This would mean adding an optional callback (kw-only) parameter to the recv_nowait() and send_nowait() channel methods. The callback would be called once the object was sent or received (respectively). (Note that making channels buffered makes readiness callbacks less important.) Support for iteration Supporting iteration on RecvChannel (via __iter__() or _next__()) may be useful. A trivial implementation would use the recv() method, similar to how files do iteration. Since this isn't a fundamental capability and has a simple analog, adding iteration support can wait until later. Channel context managers Context manager support on RecvChannel and SendChannel may be helpful. The implementation would be simple, wrapping a call to close() (or maybe release()) like files do. As with iteration, this can wait. Pipes and Queues With the proposed object passing mechanism of "os.pipe()", other similar basic types aren't strictly required to achieve the minimal useful functionality of multiple interpreters. Such types include pipes (like unbuffered channels, but one-to-one) and queues (like channels, but more generic). See below in Rejected Ideas for more information. Even though these types aren't part of this proposal, they may still be useful in the context of concurrency. Adding them later is entirely reasonable. The could be trivially implemented as wrappers around channels. Alternatively they could be implemented for efficiency at the same low level as channels. Return a lock from send() When sending an object through a channel, you don't have a way of knowing when the object gets received on the other end. One way to work around this is to return a locked threading.Lock from SendChannel.send() that unlocks once the object is received. Alternately, the proposed SendChannel.send() (blocking) and SendChannel.send_nowait() provide an explicit distinction that is less likely to confuse users. Note that returning a lock would matter for buffered channels (i.e. queues). For unbuffered channels it is a non-issue. Support prioritization in channels A simple example is queue.PriorityQueue in the stdlib. Support inheriting settings (and more?) Folks might find it useful, when creating a new interpreter, to be able to indicate that they would like some things "inherited" by the new interpreter. The mechanism could be a strict copy or it could be copy-on-write. The motivating example is with the warnings module (e.g. copy the filters). The feature isn't critical, nor would it be widely useful, so it can wait until there's interest. Notably, both suggested solutions will require significant work, especially when it comes to complex objects and most especially for mutable containers of mutable complex objects. Make exceptions shareable Exceptions are propagated out of run() calls, so it isn't a big leap to make them shareable. However, as noted elsewhere, it isn't essential or (particularly common) so we can wait on doing that. Make everything shareable through serialization We could use pickle (or marshal) to serialize everything and thus make them shareable. Doing this is potentially inefficient, but it may be a matter of convenience in the end. We can add it later, but trying to remove it later would be significantly more painful. Make RunFailedError.__cause__ lazy An uncaught exception in a subinterpreter (from interp.exec()) is copied to the calling interpreter and set as __cause__ on a RunFailedError which is then raised. That copying part involves some sort of deserialization in the calling interpreter, which can be expensive (e.g. due to imports) yet is not always necessary. So it may be useful to use an ExceptionProxy type to wrap the serialized exception and only deserialize it when needed. That could be via ExceptionProxy__getattribute__() or perhaps through RunFailedError.resolve() (which would raise the deserialized exception and set RunFailedError.__cause__ to the exception. It may also make sense to have RunFailedError.__cause__ be a descriptor that does the lazy deserialization (and set __cause__) on the RunFailedError instance. Return a value from interp.exec() Currently interp.exec() always returns None. One idea is to return the return value from whatever the subinterpreter ran. However, for now it doesn't make sense. The only thing folks can run is a string of code (i.e. a script). This is equivalent to PyRun_StringFlags(), exec(), or a module body. None of those "return" anything. We can revisit this once interp.exec() supports functions, etc. Add a shareable synchronization primitive This would be _threading.Lock (or something like it) where interpreters would actually share the underlying mutex. The main concern is that locks and isolated interpreters may not mix well (as learned in Go). We can add this later if it proves desirable without much trouble. Propagate SystemExit and KeyboardInterrupt Differently The exception types that inherit from BaseException (aside from Exception) are usually treated specially. These types are: KeyboardInterrupt, SystemExit, and GeneratorExit. It may make sense to treat them specially when it comes to propagation from interp.exec(). Here are some options: * propagate like normal via RunFailedError * do not propagate (handle them somehow in the subinterpreter) * propagate them directly (avoid RunFailedError) * propagate them directly (set RunFailedError as __cause__) We aren't going to worry about handling them differently. Threads already ignore SystemExit, so for now we will follow that pattern. Add an explicit release() and close() to channel end classes It can be convenient to have an explicit way to close a channel against further global use. Likewise it could be useful to have an explicit way to release one of the channel ends relative to the current interpreter. Among other reasons, such a mechanism is useful for communicating overall state between interpreters without the extra boilerplate that passing objects through a channel directly would require. The challenge is getting automatic release/close right without making it hard to understand. This is especially true when dealing with a non-empty channel. We should be able to get by without release/close for now. Add SendChannel.send_buffer() This method would allow no-copy sending of an object through a channel if it supports the PEP 3118 buffer protocol (e.g. memoryview). Support for this is not fundamental to channels and can be added on later without much disruption. Auto-run in a thread The PEP proposes a hard separation between subinterpreters and threads: if you want to run in a thread you must create the thread yourself and call interp.exec() in it. However, it might be convenient if interp.exec() could do that for you, meaning there would be less boilerplate. Furthermore, we anticipate that users will want to run in a thread much more often than not. So it would make sense to make this the default behavior. We would add a kw-only param "threaded" (default True) to interp.exec() to allow the run-in-the-current-thread operation. Rejected Ideas Explicit channel association Interpreters are implicitly associated with channels upon recv() and send() calls. They are de-associated with release() calls. The alternative would be explicit methods. It would be either add_channel() and remove_channel() methods on Interpreter objects or something similar on channel objects. In practice, this level of management shouldn't be necessary for users. So adding more explicit support would only add clutter to the API. Add an API based on pipes A pipe would be a simplex FIFO between exactly two interpreters. For most use cases this would be sufficient. It could potentially simplify the implementation as well. However, it isn't a big step to supporting a many-to-many simplex FIFO via channels. Also, with pipes the API ends up being slightly more complicated, requiring naming the pipes. Add an API based on queues Queues and buffered channels are almost the same thing. The main difference is that channels have a stronger relationship with context (i.e. the associated interpreter). The name "Channel" was used instead of "Queue" to avoid confusion with the stdlib queue.Queue. "enumerate" The list_all() function provides the list of all interpreters. In the threading module, which partly inspired the proposed API, the function is called enumerate(). The name is different here to avoid confusing Python users that are not already familiar with the threading API. For them "enumerate" is rather unclear, whereas "list_all" is clear. Alternate solutions to prevent leaking exceptions across interpreters In function calls, uncaught exceptions propagate to the calling frame. The same approach could be taken with interp.exec(). However, this would mean that exception objects would leak across the inter-interpreter boundary. Likewise, the frames in the traceback would potentially leak. While that might not be a problem currently, it would be a problem once interpreters get better isolation relative to memory management (which is necessary to stop sharing the GIL between interpreters). We've resolved the semantics of how the exceptions propagate by raising a RunFailedError instead, for which __cause__ wraps a safe proxy for the original exception and traceback. Rejected possible solutions: - reproduce the exception and traceback in the original interpreter and raise that. - raise a subclass of RunFailedError that proxies the original exception and traceback. - raise RuntimeError instead of RunFailedError - convert at the boundary (a la subprocess.CalledProcessError) (requires a cross-interpreter representation) - support customization via Interpreter.excepthook (requires a cross-interpreter representation) - wrap in a proxy at the boundary (including with support for something like err.raise() to propagate the traceback). - return the exception (or its proxy) from interp.exec() instead of raising it - return a result object (like subprocess does) [result-object] (unnecessary complexity?) - throw the exception away and expect users to deal with unhandled exceptions explicitly in the script they pass to interp.exec() (they can pass error info out via channels); with threads you have to do something similar Always associate each new interpreter with its own thread As implemented in the C-API, an interpreter is not inherently tied to any thread. Furthermore, it will run in any existing thread, whether created by Python or not. You only have to activate one of its thread states (PyThreadState) in the thread first. This means that the same thread may run more than one interpreter (though obviously not at the same time). The proposed module maintains this behavior. Interpreters are not tied to threads. Only calls to Interpreter.exec() are. However, one of the key objectives of this PEP is to provide a more human-centric concurrency model. With that in mind, from a conceptual standpoint the module might be easier to understand if each interpreter were associated with its own thread. That would mean interpreters.create() would create a new thread and Interpreter.exec() would only execute in that thread (and nothing else would). The benefit is that users would not have to wrap Interpreter.exec() calls in a new threading.Thread. Nor would they be in a position to accidentally pause the current interpreter (in the current thread) while their interpreter executes. The idea is rejected because the benefit is small and the cost is high. The difference from the capability in the C-API would be potentially confusing. The implicit creation of threads is magical. The early creation of threads is potentially wasteful. The inability to run arbitrary interpreters in an existing thread would prevent some valid use cases, frustrating users. Tying interpreters to threads would require extra runtime modifications. It would also make the module's implementation overly complicated. Finally, it might not even make the module easier to understand. Only associate interpreters upon use Associate interpreters with channel ends only once recv(), send(), etc. are called. Doing this is potentially confusing and also can lead to unexpected races where a channel is auto-closed before it can be used in the original (creating) interpreter. Allow multiple simultaneous calls to Interpreter.exec() This would make sense especially if Interpreter.exec() were to manage new threads for you (which we've rejected). Essentially, each call would run independently, which would be mostly fine from a narrow technical standpoint, since each interpreter can have multiple threads. The problem is that the interpreter has only one __main__ module and simultaneous Interpreter.exec() calls would have to sort out sharing __main__ or we'd have to invent a new mechanism. Neither would be simple enough to be worth doing. Add a "reraise" method to RunFailedError While having __cause__ set on RunFailedError helps produce a more useful traceback, it's less helpful when handling the original error. To help facilitate this, we could add RunFailedError.reraise(). This method would enable the following pattern: try: try: interp.exec(script) except RunFailedError as exc: exc.reraise() except MyException: ... This would be made even simpler if there existed a __reraise__ protocol. All that said, this is completely unnecessary. Using __cause__ is good enough: try: try: interp.exec(script) except RunFailedError as exc: raise exc.__cause__ except MyException: ... Note that in extreme cases it may require a little extra boilerplate: try: try: interp.exec(script) except RunFailedError as exc: if exc.__cause__ is not None: raise exc.__cause__ raise # re-raise except MyException: ... Implementation The implementation of the PEP has 4 parts: - the high-level module described in this PEP (mostly a light wrapper around a low-level C extension - the low-level C extension module - additions to the internal C-API needed by the low-level module - secondary fixes/changes in the CPython runtime that facilitate the low-level module (among other benefits) These are at various levels of completion, with more done the lower you go: - the high-level module has been, at best, roughly implemented. However, fully implementing it will be almost trivial. - the low-level module is mostly complete. The bulk of the implementation was merged into master in December 2018 as the "_xxsubinterpreters" module (for the sake of testing multiple interpreters functionality). Only the exception propagation implementation remains to be finished, which will not require extensive work. - all necessary C-API work has been finished - all anticipated work in the runtime has been finished The implementation effort for PEP 554 is being tracked as part of a larger project aimed at improving multi-core support in CPython. [multi-core-project] References - mp-conn https://docs.python.org/3/library/multiprocessing.html#connection-objects - main-thread https://mail.python.org/pipermail/python-ideas/2017-September/047144.html https://mail.python.org/pipermail/python-dev/2017-September/149566.html - petr-c-ext https://mail.python.org/pipermail/import-sig/2016-June/001062.html https://mail.python.org/pipermail/python-ideas/2016-April/039748.html Copyright This document has been placed in the public domain. CSP https://en.wikipedia.org/wiki/Communicating_sequential_processes https://github.com/futurecore/python-csp async https://mail.python.org/pipermail/python-dev/2017-September/149420.html https://mail.python.org/pipermail/python-dev/2017-September/149585.html benefits https://mail.python.org/pipermail/python-ideas/2017-September/047122.html bug-rate https://mail.python.org/pipermail/python-ideas/2017-September/047094.html c-api https://docs.python.org/3/c-api/init.html#sub-interpreter-support cache-line-ping-pong https://mail.python.org/archives/list/[email protected]/message/3HVRFWHDMWPNR367GXBILZ4JJAUQ2STZ/ caveats https://docs.python.org/3/c-api/init.html#bugs-and-caveats cryptography https://github.com/pyca/cryptography/issues/2299 fifo https://docs.python.org/3/library/multiprocessing.html#multiprocessing.Pipe https://docs.python.org/3/library/multiprocessing.html#multiprocessing.Queue https://docs.python.org/3/library/queue.html#module-queue http://stackless.readthedocs.io/en/2.7-slp/library/stackless/channels.html https://golang.org/doc/effective_go.html#sharing http://www.jtolds.com/writing/2016/03/go-channels-are-bad-and-you-should-feel-bad/ gilstate https://bugs.python.org/issue10915 http://bugs.python.org/issue15751 jython https://mail.python.org/pipermail/python-ideas/2017-May/045771.html multi-core-project https://github.com/ericsnowcurrently/multi-core-python reset_globals https://mail.python.org/pipermail/python-dev/2017-September/149545.html result-object https://mail.python.org/pipermail/python-dev/2017-September/149562.html
python-peps
2024-10-18T13:23:32.750387
2017-09-05T00:00:00
{ "license": "Public Domain", "url": "https://peps.python.org/pep-0554/", "authors": [ "Eric Snow" ], "pep_number": "0554", "pandoc_version": "3.5" }
0692
PEP: 692 Title: Using TypedDict for more precise **kwargs typing Author: Franek Magiera <[email protected]> Sponsor: Jelle Zijlstra <[email protected]> Discussions-To: https://discuss.python.org/t/pep-692-using-typeddict-for-more-precise-kwargs-typing/17314 Status: Final Type: Standards Track Topic: Typing Created: 29-May-2022 Python-Version: 3.12 Post-History: 29-May-2022, 12-Jul-2022, 12-Jul-2022, Resolution: https://discuss.python.org/t/pep-692-using-typeddict-for-more-precise-kwargs-typing/17314/81 typing:unpack-kwargs Abstract Currently **kwargs can be type hinted as long as all of the keyword arguments specified by them are of the same type. However, that behaviour can be very limiting. Therefore, in this PEP we propose a new way to enable more precise **kwargs typing. The new approach revolves around using TypedDict to type **kwargs that comprise keyword arguments of different types. Motivation Currently annotating **kwargs with a type T means that the kwargs type is in fact dict[str, T]. For example: def foo(**kwargs: str) -> None: ... means that all keyword arguments in foo are strings (i.e., kwargs is of type dict[str, str]). This behaviour limits the ability to type annotate **kwargs only to the cases where all of them are of the same type. However, it is often the case that keyword arguments conveyed by **kwargs have different types that are dependent on the keyword's name. In those cases type annotating **kwargs is not possible. This is especially a problem for already existing codebases where the need of refactoring the code in order to introduce proper type annotations may be considered not worth the effort. This in turn prevents the project from getting all of the benefits that type hinting can provide. Moreover, **kwargs can be used to reduce the amount of code needed in cases when there is a top-level function that is a part of a public API and it calls a bunch of helper functions, all of which expect the same keyword arguments. Unfortunately, if those helper functions were to use **kwargs, there is no way to properly type hint them if the keyword arguments they expect are of different types. In addition, even if the keyword arguments are of the same type, there is no way to check whether the function is being called with keyword names that it actually expects. As described in the Intended Usage section, using **kwargs is not always the best tool for the job. Despite that, it is still a widely used pattern. As a consequence, there has been a lot of discussion around supporting more precise **kwargs typing and it became a feature that would be valuable for a large part of the Python community. This is best illustrated by the mypy GitHub issue 4441 which contains a lot of real world cases that could benefit from this propsal. One more use case worth mentioning for which **kwargs are also convenient, is when a function should accommodate optional keyword-only arguments that don't have default values. A need for a pattern like that can arise when values that are usually used as defaults to indicate no user input, such as None, can be passed in by a user and should result in a valid, non-default behavior. For example, this issue came up in the popular httpx library. Rationale PEP 589 introduced the TypedDict type constructor that supports dictionary types consisting of string keys and values of potentially different types. A function's keyword arguments represented by a formal parameter that begins with double asterisk, such as **kwargs, are received as a dictionary. Additionally, such functions are often called using unpacked dictionaries to provide keyword arguments. This makes TypedDict a perfect candidate to be used for more precise **kwargs typing. In addition, with TypedDict keyword names can be taken into account during static type analysis. However, specifying **kwargs type with a TypedDict means, as mentioned earlier, that each keyword argument specified by **kwargs is a TypedDict itself. For instance: class Movie(TypedDict): name: str year: int def foo(**kwargs: Movie) -> None: ... means that each keyword argument in foo is itself a Movie dictionary that has a name key with a string type value and a year key with an integer type value. Therefore, in order to support specifying kwargs type as a TypedDict without breaking current behaviour, a new construct has to be introduced. To support this use case, we propose reusing Unpack which was initially introduced in PEP 646. There are several reasons for doing so: - Its name is quite suitable and intuitive for the **kwargs typing use case as our intention is to "unpack" the keywords arguments from the supplied TypedDict. - The current way of typing *args would be extended to **kwargs and those are supposed to behave similarly. - There would be no need to introduce any new special forms. - The use of Unpack for the purposes described in this PEP does not interfere with the use cases described in PEP 646. Specification With Unpack we introduce a new way of annotating **kwargs. Continuing the previous example: def foo(**kwargs: Unpack[Movie]) -> None: ... would mean that the **kwargs comprise two keyword arguments specified by Movie (i.e. a name keyword of type str and a year keyword of type int). This indicates that the function should be called as follows: kwargs: Movie = {"name": "Life of Brian", "year": 1979} foo(**kwargs) # OK! foo(name="The Meaning of Life", year=1983) # OK! When Unpack is used, type checkers treat kwargs inside the function body as a TypedDict: def foo(**kwargs: Unpack[Movie]) -> None: assert_type(kwargs, Movie) # OK! Using the new annotation will not have any runtime effect - it should only be taken into account by type checkers. Any mention of errors in the following sections relates to type checker errors. Function calls with standard dictionaries Passing a dictionary of type dict[str, object] as a **kwargs argument to a function that has **kwargs annotated with Unpack must generate a type checker error. On the other hand, the behaviour for functions using standard, untyped dictionaries can depend on the type checker. For example: def foo(**kwargs: Unpack[Movie]) -> None: ... movie: dict[str, object] = {"name": "Life of Brian", "year": 1979} foo(**movie) # WRONG! Movie is of type dict[str, object] typed_movie: Movie = {"name": "The Meaning of Life", "year": 1983} foo(**typed_movie) # OK! another_movie = {"name": "Life of Brian", "year": 1979} foo(**another_movie) # Depends on the type checker. Keyword collisions A TypedDict that is used to type **kwargs could potentially contain keys that are already defined in the function's signature. If the duplicate name is a standard parameter, an error should be reported by type checkers. If the duplicate name is a positional-only parameter, no errors should be generated. For example: def foo(name, **kwargs: Unpack[Movie]) -> None: ... # WRONG! "name" will # always bind to the # first parameter. def foo(name, /, **kwargs: Unpack[Movie]) -> None: ... # OK! "name" is a # positional-only parameter, # so **kwargs can contain # a "name" keyword. Required and non-required keys By default all keys in a TypedDict are required. This behaviour can be overridden by setting the dictionary's total parameter as False. Moreover, PEP 655 introduced new type qualifiers - typing.Required and typing.NotRequired - that enable specifying whether a particular key is required or not: class Movie(TypedDict): title: str year: NotRequired[int] When using a TypedDict to type **kwargs all of the required and non-required keys should correspond to required and non-required function keyword parameters. Therefore, if a required key is not supported by the caller, then an error must be reported by type checkers. Assignment Assignments of a function typed with **kwargs: Unpack[Movie] and another callable type should pass type checking only if they are compatible. This can happen for the scenarios described below. Source and destination contain **kwargs Both destination and source functions have a **kwargs: Unpack[TypedDict] parameter and the destination function's TypedDict is assignable to the source function's TypedDict and the rest of the parameters are compatible: class Animal(TypedDict): name: str class Dog(Animal): breed: str def accept_animal(**kwargs: Unpack[Animal]): ... def accept_dog(**kwargs: Unpack[Dog]): ... accept_dog = accept_animal # OK! Expression of type Dog can be # assigned to a variable of type Animal. accept_animal = accept_dog # WRONG! Expression of type Animal # cannot be assigned to a variable of type Dog. Source contains **kwargs and destination doesn't The destination callable doesn't contain **kwargs, the source callable contains **kwargs: Unpack[TypedDict] and the destination function's keyword arguments are assignable to the corresponding keys in source function's TypedDict. Moreover, not required keys should correspond to optional function arguments, whereas required keys should correspond to required function arguments. Again, the rest of the parameters have to be compatible. Continuing the previous example: class Example(TypedDict): animal: Animal string: str number: NotRequired[int] def src(**kwargs: Unpack[Example]): ... def dest(*, animal: Dog, string: str, number: int = ...): ... dest = src # OK! It is worth pointing out that the destination function's parameters that are to be compatible with the keys and values from the TypedDict must be keyword only: def dest(dog: Dog, string: str, number: int = ...): ... dog: Dog = {"name": "Daisy", "breed": "labrador"} dest(dog, "some string") # OK! dest = src # Type checker error! dest(dog, "some string") # The same call fails at # runtime now because 'src' expects # keyword arguments. The reverse situation where the destination callable contains **kwargs: Unpack[TypedDict] and the source callable doesn't contain **kwargs should be disallowed. This is because, we cannot be sure that additional keyword arguments are not being passed in when an instance of a subclass had been assigned to a variable with a base class type and then unpacked in the destination callable invocation: def dest(**kwargs: Unpack[Animal]): ... def src(name: str): ... dog: Dog = {"name": "Daisy", "breed": "Labrador"} animal: Animal = dog dest = src # WRONG! dest(**animal) # Fails at runtime. Similar situation can happen even without inheritance as compatibility between TypedDicts is based on structural subtyping. Source contains untyped **kwargs The destination callable contains **kwargs: Unpack[TypedDict] and the source callable contains untyped **kwargs: def src(**kwargs): ... def dest(**kwargs: Unpack[Movie]): ... dest = src # OK! Source contains traditionally typed **kwargs: T The destination callable contains **kwargs: Unpack[TypedDict], the source callable contains traditionally typed **kwargs: T and each of the destination function TypedDict's fields is assignable to a variable of type T: class Vehicle: ... class Car(Vehicle): ... class Motorcycle(Vehicle): ... class Vehicles(TypedDict): car: Car moto: Motorcycle def dest(**kwargs: Unpack[Vehicles]): ... def src(**kwargs: Vehicle): ... dest = src # OK! On the other hand, if the destination callable contains either untyped or traditionally typed **kwargs: T and the source callable is typed using **kwargs: Unpack[TypedDict] then an error should be generated, because traditionally typed **kwargs aren't checked for keyword names. To summarize, function parameters should behave contravariantly and function return types should behave covariantly. Passing kwargs inside a function to another function A previous point mentions the problem of possibly passing additional keyword arguments by assigning a subclass instance to a variable that has a base class type. Let's consider the following example: class Animal(TypedDict): name: str class Dog(Animal): breed: str def takes_name(name: str): ... dog: Dog = {"name": "Daisy", "breed": "Labrador"} animal: Animal = dog def foo(**kwargs: Unpack[Animal]): print(kwargs["name"].capitalize()) def bar(**kwargs: Unpack[Animal]): takes_name(**kwargs) def baz(animal: Animal): takes_name(**animal) def spam(**kwargs: Unpack[Animal]): baz(kwargs) foo(**animal) # OK! foo only expects and uses keywords of 'Animal'. bar(**animal) # WRONG! This will fail at runtime because 'breed' keyword # will be passed to 'takes_name' as well. spam(**animal) # WRONG! Again, 'breed' keyword will be eventually passed # to 'takes_name'. In the example above, the call to foo will not cause any issues at runtime. Even though foo expects kwargs of type Animal it doesn't matter if it receives additional arguments because it only reads and uses what it needs completely ignoring any additional values. The calls to bar and spam will fail because an unexpected keyword argument will be passed to the takes_name function. Therefore, kwargs hinted with an unpacked TypedDict can only be passed to another function if the function to which unpacked kwargs are being passed to has **kwargs in its signature as well, because then additional keywords would not cause errors at runtime during function invocation. Otherwise, the type checker should generate an error. In cases similar to the bar function above the problem could be worked around by explicitly dereferencing desired fields and using them as arguments to perform the function call: def bar(**kwargs: Unpack[Animal]): name = kwargs["name"] takes_name(name) Using Unpack with types other than TypedDict As described in the Rationale section, TypedDict is the most natural candidate for typing **kwargs. Therefore, in the context of typing **kwargs, using Unpack with types other than TypedDict should not be allowed and type checkers should generate errors in such cases. Changes to Unpack Currently using Unpack in the context of typing is interchangeable with using the asterisk syntax: >>> Unpack[Movie] *<class '__main__.Movie'> Therefore, in order to be compatible with the new use case, Unpack's repr should be changed to simply Unpack[T]. Intended Usage The intended use cases for this proposal are described in the Motivation section. In summary, more precise **kwargs typing can bring benefits to already existing codebases that decided to use **kwargs initially, but now are mature enough to use a stricter contract via type hints. Using **kwargs can also help in reducing code duplication and the amount of copy-pasting needed when there is a bunch of functions that require the same set of keyword arguments. Finally, **kwargs are useful for cases when a function needs to facilitate optional keyword arguments that don't have obvious default values. However, it has to be pointed out that in some cases there are better tools for the job than using TypedDict to type **kwargs as proposed in this PEP. For example, when writing new code if all the keyword arguments are required or have default values then writing everything explicitly is better than using **kwargs and a TypedDict: def foo(name: str, year: int): ... # Preferred way. def foo(**kwargs: Unpack[Movie]): ... Similarly, when type hinting third party libraries via stubs it is again better to state the function signature explicitly - this is the only way to type such a function if it has default arguments. Another issue that may arise in this case when trying to type hint the function with a TypedDict is that some standard function parameters may be treated as keyword only: def foo(name, year): ... # Function in a third party library. def foo(Unpack[Movie]): ... # Function signature in a stub file. foo("Life of Brian", 1979) # This would be now failing type # checking but is fine. foo(name="Life of Brian", year=1979) # This would be the only way to call # the function now that passes type # checking. Therefore, in this case it is again preferred to type hint such function explicitly as: def foo(name: str, year: int): ... Also, for the benefit of IDEs and documentation pages, functions that are part of the public API should prefer explicit keyword parameters whenever possible. How to Teach This This PEP could be linked in the typing module's documentation. Moreover, a new section on using Unpack could be added to the aforementioned docs. Similar sections could be also added to the mypy documentation and the typing RTD documentation. Reference Implementation The mypy type checker already supports more precise **kwargs typing using Unpack. Pyright type checker also provides provisional support for this feature. Rejected Ideas TypedDict unions It is possible to create unions of typed dictionaries. However, supporting typing **kwargs with a union of typed dicts would greatly increase the complexity of the implementation of this PEP and there seems to be no compelling use case to justify the support for this. Therefore, using unions of typed dictionaries to type **kwargs as described in the context of this PEP can result in an error: class Book(TypedDict): genre: str pages: int TypedDictUnion = Movie | Book def foo(**kwargs: Unpack[TypedDictUnion]) -> None: ... # WRONG! Unsupported use # of a union of # TypedDicts to type # **kwargs Instead, a function that expects a union of TypedDicts can be overloaded: @overload def foo(**kwargs: Unpack[Movie]): ... @overload def foo(**kwargs: Unpack[Book]): ... Changing the meaning of **kwargs annotations One way to achieve the purpose of this PEP would be to change the meaning of **kwargs annotations, so that the annotations would apply to the entire **kwargs dict, not to individual elements. For consistency, we would have to make an analogous change to *args annotations. This idea was discussed in a meeting of the typing community, and the consensus was that the change would not be worth the cost. There is no clear migration path, the current meaning of *args and **kwargs annotations is well-established in the ecosystem, and type checkers would have to introduce new errors for code that is currently legal. Introducing a new syntax In the previous versions of this PEP, using a double asterisk syntax was proposed to support more precise **kwargs typing. Using this syntax, functions could be annotated as follows: def foo(**kwargs: **Movie): ... Which would have the same meaning as: def foo(**kwargs: Unpack[Movie]): ... This greatly increased the scope of the PEP, as it would require a grammar change and adding a new dunder for the Unpack special form. At the same the justification for introducing a new syntax was not strong enough and became a blocker for the whole PEP. Therefore, we decided to abandon the idea of introducing a new syntax as a part of this PEP and may propose it again in a separate one. References Copyright This document is placed in the public domain or under the CC0-1.0-Universal license, whichever is more permissive.
python-peps
2024-10-18T13:23:32.777344
2022-05-29T00:00:00
{ "license": "Public Domain", "url": "https://peps.python.org/pep-0692/", "authors": [ "Franek Magiera" ], "pep_number": "0692", "pandoc_version": "3.5" }
8104
PEP: 8104 Title: 2023 Term Steering Council election Author: Ee Durbin <[email protected]> Sponsor: Brett Cannon <[email protected]> Status: Final Type: Informational Topic: Governance Created: 08-Nov-2022 Abstract This document describes the schedule and other details of the December 2022 election for the Python steering council, as specified in PEP 13. This is the steering council election for the 2023 term (i.e. Python 3.12). Election Administration The steering council appointed the Python Software Foundation Director of Infrastructure, Ee Durbin, to administer the election. Schedule There will be a two-week nomination period, followed by a two-week vote. The nomination period was: November 14, 2022 through November 28, 2022 AoE[1]. The voting period was: December 1, 2022 through December 14, 2022 AoE[2]. Candidates Candidates must be nominated by a core team member. If the candidate is a core team member, they may nominate themselves. Nominees (in alphabetical order): - Brett Cannon - Emily Morehouse - Dong-hee Na - Pablo Galindo Salgado - Gregory P. Smith - Victor Stinner - Petr Viktorin - Thomas Wouters Withdrawn nominations: - None Voter Roll All active Python core team members are eligible to vote. Active status is determined as described in PEP 13 <13#membership> and implemented via the software at python/voters [3]. Ballots will be distributed based on the the Python Voter Roll[4] for this election. While this file is not public as it contains private email addresses, the Complete Voter Roll by name will be made available when the roll is created. Election Implementation The election will be conducted using the Helios Voting Service. Configuration Short name: 2023-python-steering-council Name: 2023 Python Steering Council Election Description: Election for the Python steering council, as specified in PEP 13. This is steering council election for the 2023 term. type: Election Use voter aliases: [X] Randomize answer order: [X] Private: [X] Help Email Address: [email protected] Voting starts at: December 1, 2022 12:00 UTC Voting ends at: December 15, 2022 12:00 UTC This will create an election in which: - Voting is not open to the public, only those on the Voter Roll may participate. Ballots will be emailed when voting starts. - Candidates are presented in random order, to help avoid bias. - Voter identities and ballots are protected against cryptographic advances. Questions Question 1 Select between 0 and - (approval) answers. Result Type: absolute Question: Select candidates for the Python Steering Council Answer #1 - #N: Candidates from Candidates_ Section Results Of 85 eligible voters, 66 cast ballots. The top five vote-getters are: - Pablo Galindo Salgado - Gregory P. Smith - Emily Morehouse - Brett Cannon - Thomas Wouters No conflict of interest as defined in PEP 13 were observed. The full vote counts are as follows: ---------------------------------------- Candidate Votes Received ----------------------- ---------------- Pablo Galindo Salgado 61 Gregory P. Smith 48 Emily Morehouse 47 Brett Cannon 42 Thomas Wouters 39 Petr Viktorin 36 Victor Stinner 34 Dong-hee Na 29 ---------------------------------------- Copyright This document has been placed in the public domain. Complete Voter Roll Active Python core developers Alex Gaynor Alex Waygood Ammar Askar Andrew Svetlov Antoine Pitrou Barry Warsaw Batuhan Taskaya Benjamin Peterson Berker Peksağ Brandt Bucher Brett Cannon Brian Curtin Brian Quinlan Carol Willing Cheryl Sabella Chris Jerdonek Chris Withers Christian Heimes Dennis Sweeney Dino Viehland Dong-hee Na Emily Morehouse Éric Araujo Eric Snow Eric V. Smith Erlend Egeberg Aasland Ethan Furman Ezio Melotti Facundo Batista Filipe Laíns Fred Drake Georg Brandl Giampaolo Rodolà Gregory P. Smith Guido van Rossum Hugo van Kemenade Hynek Schlawack Inada Naoki Irit Katriel Ivan Levkivskyi Jason R. Coombs Jelle Zijlstra Jeremy Kloth Jesús Cea Joannah Nanjekye Julien Palard Karthikeyan Singaravelan Ken Jin Kumar Aditya Kurt B. Kaiser Kushal Das Kyle Stanley Larry Hastings Łukasz Langa Lysandros Nikolaou Marc-André Lemburg Mariatta Mark Dickinson Mark Shannon Nathaniel J. Smith Ned Deily Neil Schemenauer Alyssa Coghlan Pablo Galindo Paul Ganssle Paul Moore Petr Viktorin R. David Murray Raymond Hettinger Ronald Oussoren Senthil Kumaran Serhiy Storchaka Stefan Behnel Stéphane Wirtel Steve Dower Steven D'Aprano Tal Einat Terry Jan Reedy Thomas Wouters Tim Golden Tim Peters Victor Stinner Vinay Sajip Yury Selivanov Zachary Ware [1] AoE: Anywhere on Earth. [2] AoE: Anywhere on Earth. [3] This repository is private and accessible only to Python Core Developers, administrators, and Python Software Foundation Staff as it contains personal email addresses. [4] This repository is private and accessible only to Python Core Developers, administrators, and Python Software Foundation Staff as it contains personal email addresses.
python-peps
2024-10-18T13:23:32.791087
2022-11-08T00:00:00
{ "license": "Public Domain", "url": "https://peps.python.org/pep-8104/", "authors": [ "Ee Durbin" ], "pep_number": "8104", "pandoc_version": "3.5" }
0668
PEP: 668 Title: Marking Python base environments as “externally managed” Author: Geoffrey Thomas <[email protected]>, Matthias Klose <[email protected]>, Filipe Laíns <[email protected]>, Donald Stufft <[email protected]>, Tzu-ping Chung <[email protected]>, Stefano Rivera <[email protected]>, Elana Hashman <[email protected]>, Pradyun Gedam <[email protected]> PEP-Delegate: Paul Moore <[email protected]> Discussions-To: https://discuss.python.org/t/10302 Status: Accepted Type: Standards Track Topic: Packaging Content-Type: text/x-rst Created: 18-May-2021 Post-History: 28-May-2021 Resolution: https://discuss.python.org/t/10302/44 externally-managed-environments Abstract A long-standing practical problem for Python users has been conflicts between OS package managers and Python-specific package management tools like pip. These conflicts include both Python-level API incompatibilities and conflicts over file ownership. Historically, Python-specific package management tools have defaulted to installing packages into an implicit global context. With the standardization and popularity of virtual environments, a better solution for most (but not all) use cases is to use Python-specific package management tools only within a virtual environment. This PEP proposes a mechanism for a Python installation to communicate to tools like pip that its global package installation context is managed by some means external to Python, such as an OS package manager. It specifies that Python-specific package management tools should neither install nor remove packages into the interpreter's global context, by default, and should instead guide the end user towards using a virtual environment. It also standardizes an interpretation of the sysconfig schemes so that, if a Python-specific package manager is about to install a package in an interpreter-wide context, it can do so in a manner that will avoid conflicting with the external package manager and reduces the risk of breaking software shipped by the external package manager. Terminology A few terms used in this PEP have multiple meanings in the contexts that it spans. For clarity, this PEP uses the following terms in specific ways: distro Short for "distribution," a collection of various sorts of software, ideally designed to work properly together, including (in contexts relevant to this document) the Python interpreter itself, software written in Python, and software written in other languages. That is, this is the sense used in phrases such as "Linux distro" or "Berkeley Software Distribution." A distro can be an operating system (OS) of its own, such as Debian, Fedora, or FreeBSD. It can also be an overlay distribution that installs on top of an existing OS, such as Homebrew or MacPorts. This document uses the short term "distro," because the term "distribution" has another meaning in Python packaging contexts: a source or binary distribution package of a single piece of Python language software, that is, in the sense of setuptools.dist.Distribution or "sdist". To avoid confusion, this document does not use the plain term "distribution" at all. In the Python packaging sense, it uses the full phrase "distribution package" or just "package" (see below). The provider of a distro - the team or company that collects and publishes the software and makes any needed modifications - is its distributor. package A unit of software that can be installed and used within Python. That is, this refers to what Python-specific packaging tools tend to call a "distribution package" or simply a "distribution"; the colloquial abbreviation "package" is used in the sense of the Python Package Index. This document does not use "package" in the sense of an importable name that contains Python modules, though in many cases, a distribution package consists of a single importable package of the same name. This document generally does not use the term "package" to refer to units of installation by a distro's package manager (such as .deb or .rpm files). When needed, it uses phrasing such as "a distro's package." (Again, in many cases, a Python package is shipped inside a distro's package named something like python- plus the Python package name.) Python-specific package manager A tool for installing, upgrading, and/or removing Python packages in a manner that conforms to Python packaging standards (such as PEP 376 and PEP 427). The most popular Python-specific package manager is pip[1]; other examples include the old Easy Install command[2] as well as direct usage of a setup.py command. (Conda is a bit of a special case, as the conda command can install much more than just Python packages, making it more like a distro package manager in some senses. Since the conda command generally only operates on Conda-created environments, most of the concerns in this document do not apply to conda when acting as a Python-specific package manager.) distro package manager A tool for installing, upgrading, and/or removing a distro's packages in an installed instance of that distro, which is capable of installing Python packages as well as non-Python packages, and therefore generally has its own database of installed software unrelated to PEP 376. Examples include apt, dpkg, dnf, rpm, pacman, and brew. The salient feature is that if a package was installed by a distro package manager, removing or upgrading it in a way that would satisfy a Python-specific package manager will generally leave a distro package manager in an inconsistent state. This document also uses phrases like "external package manager" or "system's package manager" to refer to a distro package manager in certain contexts. shadow To shadow an installed Python package is to cause some other package to be preferred for imports without removing any files from the shadowed package. This requires multiple entries on sys.path: if package A 2.0 installs module a.py in one sys.path entry, and package A 1.0 installs module a.py in a later sys.path entry, then import a returns the module from the former, and we say that A 2.0 shadows A 1.0. Motivation Thanks to Python's immense popularity, software distros (by which we mean Linux and other OS distros as well as overlay distros like Homebrew and MacPorts) generally ship Python for two purposes: as a software package to be used in its own right by end users, and as a language dependency for other software in the distro. For example, Fedora and Debian (and their downstream distros, as well as many others) ship a /usr/bin/python3 binary which provides the python3 command available to end users as well as the #!/usr/bin/python3 shebang for Python-language software included in the distro. Because there are no official binary releases of Python for Linux/UNIX, almost all Python end users on these OSes use the Python interpreter built and shipped with their distro. The python3 executable available to the users of the distro and the python3 executable available as a dependency for other software in the distro are typically the same binary. This means that if an end user installs a Python package using a tool like pip outside the context of a virtual environment, that package is visible to Python-language software shipped by the distro. If the newly-installed package (or one of its dependencies) is a newer, backwards-incompatible version of a package that was installed through the distro, it may break software shipped by the distro. This may pose a critical problem for the integrity of distros, which often have package-management tools that are themselves written in Python. For example, it's possible to unintentionally break Fedora's dnf command with a pip install command, making it hard to recover. This applies both to system-wide installs (sudo pip install) as well as user home directory installs (pip install --user), since packages in either location show up on the sys.path of /usr/bin/python3. There is a worse problem with system-wide installs: if you attempt to recover from this situation with sudo pip uninstall, you may end up removing packages that are shipped by the system's package manager. In fact, this can even happen if you simply upgrade a package - pip will try to remove the old version of the package, as shipped by the OS. At this point it may not be possible to recover the system to a consistent state using just the software remaining on the system. Over the past many years, a consensus has emerged that the best way to install Python libraries or applications (when not using a distro's package) is to use a virtual environment. This approach was popularized by the PyPA virtualenv project, and a simple version of that approach is now available in the Python standard library as venv. Installing a Python package into a virtualenv prevents it from being visible to the unqualified /usr/bin/python3 interpreter and prevents breaking system software. In some cases, however, it's useful and intentional to install a Python package from outside of the distro that influences the behavior of distro-shipped commands. This is common in the case of software like Sphinx or Ansible which have a mechanism for writing Python-language extensions. A user may want to use their distro's version of the base software (for reasons of paid support or security updates) but install a small extension from PyPI, and they'd want that extension to be importable by the software in their base system. While this continues to carry the risk of installing a newer version of a dependency than the operating system expects or otherwise negatively affecting the behavior of an application, it does not need to carry the risk of removing files from the operating system. A tool like pip should be able to install packages in some directory on the default sys.path, if specifically requested, without deleting files owned by the system's package manager. Therefore, this PEP proposes two things. First, it proposes a way for distributors of a Python interpreter to mark that interpreter as having its packages managed by means external to Python, such that Python-specific tools like pip should not change the installed packages in the interpreter's global sys.path in any way (add, upgrade/downgrade, or remove) unless specifically overridden. It also provides a means for the distributor to indicate how to use a virtual environment as an alternative. This is an opt-in mechanism: by default, the Python interpreter compiled from upstream sources will not be so marked, and so running pip install with a self-compiled interpreter, or with a distro that has not explicitly marked its interpreter, will work as it always has worked. Second, it sets the rule that when installing packages to an interpreter's global context (either to an unmarked interpreter, or if overriding the marking), Python-specific package managers should modify or delete files only within the directories of the sysconfig scheme in which they would create files. This permits a distributor of a Python interpreter to set up two directories, one for its own managed packages, and one for unmanaged packages installed by the end user, and ensure that installing unmanaged packages will not delete (or overwrite) files owned by the external package manager. Rationale As described in detail in the next section, the first behavior change involves creating a marker file named EXTERNALLY-MANAGED, whose presence indicates that non-virtual-environment package installations are managed by some means external to Python, such as a distro's package manager. This file is specified to live in the stdlib directory in the default sysconfig scheme, which marks the interpreter / installation as a whole, not a particular location on sys.path. The reason for this is that, as identified above, there are two related problems that risk breaking an externally-managed Python: you can install an incompatible new version of a package system-wide (e.g., with sudo pip install), and you can install one in your user account alone, but in a location that is on the standard Python command's sys.path (e.g., with pip install --user). If the marker file were in the system-wide site-packages directory, it would not clearly apply to the second case. The Alternatives section has further discussion of possible locations. The second behavior change takes advantage of the existing sysconfig setup in distros that have already encountered this class of problem, and specifically addresses the problem of a Python-specific package manager deleting or overwriting files that are owned by an external package manager. Use cases The changed behavior in this PEP is intended to "do the right thing" for as many use cases as possible. In this section, we consider the changes specified by this PEP for several representative use cases / contexts. Specifically, we ask about the two behaviors that could be changed by this PEP: 1. Will a Python-specific installer tool like pip install permit installations by default, after implementation of this PEP? 2. If you do run such a tool, should it be willing to delete packages shipped by the external (non-Python-specific) package manager for that context, such as a distro package manager? (For simplicity, this section discusses pip as the Python-specific installer tool, though the analysis should apply equally to any other Python-specific package management tool.) This table summarizes the use cases discussed in detail below: +------+-------------------+-------------------+-------------------+ | Case | Description | pip install | Deleting | | | | permitted | ext | | | | | ernally-installed | | | | | packages | | | | | permitted | +======+===================+===================+===================+ | 1 | Unpatched CPython | Currently yes; | Currently yes; | | | | stays yes | stays yes | +------+-------------------+-------------------+-------------------+ | 2 | Distro | Currently yes; | Currently yes | | | /usr/bin/python3 | becomes no | (except on | | | | (assuming the | Debian); becomes | | | | distro adds a | no | | | | marker file) | | +------+-------------------+-------------------+-------------------+ | 3 | Distro Python in | Currently yes; | There are no | | | venv | stays yes | ext | | | | | ernally-installed | | | | | packages | +------+-------------------+-------------------+-------------------+ | 4 | Distro Python in | Currently yes; | Currently no; | | | venv with | stays yes | stays no | | | --sys | | | | | tem-site-packages | | | +------+-------------------+-------------------+-------------------+ | 5 | Distro Python in | Currently yes; | Currently yes; | | | Docker | becomes no | becomes no | | | | (assuming the | | | | | distro adds a | | | | | marker file) | | +------+-------------------+-------------------+-------------------+ | 6 | Conda environment | Currently yes; | Currently yes; | | | | stays yes | stays yes | +------+-------------------+-------------------+-------------------+ | 7 | Dev-facing distro | Currently yes; | Currently often | | | | becomes no | yes; becomes no | | | | (assuming they | (assuming they | | | | add a marker | configure | | | | file) | sysconfig as | | | | | needed) | +------+-------------------+-------------------+-------------------+ | 8 | Distro building | Currently yes; | Currently yes; | | | packages | can stay yes | becomes no | +------+-------------------+-------------------+-------------------+ | 9 | PYTHONHOME copied | Currently yes; | Currently yes; | | | from a distro | becomes no | becomes no | | | Python stdlib | | | +------+-------------------+-------------------+-------------------+ | 10 | PYTHONHOME copied | Currently yes; | Currently yes; | | | from upstream | stays yes | stays yes | | | Python stdlib | | | +------+-------------------+-------------------+-------------------+ In more detail, the use cases above are: 1. A standard unpatched CPython, without any special configuration of or patches to sysconfig and without a marker file. This PEP does not change its behavior. Such a CPython should (regardless of this PEP) not be installed in a way that overlaps any distro-installed Python on the same system. For instance, on an OS that ships Python in /usr/bin, you should not install a custom CPython built with ./configure --prefix=/usr, or it will overwrite some files from the distro and the distro will eventually overwrite some files from your installation. Instead, your installation should be in a separate directory (perhaps /usr/local, /opt, or your home directory). Therefore, we can assume that such a CPython has its own stdlib directory and its own sysconfig schemes that do not overlap any distro-installed Python. So any OS-installed packages are not visible or relevant here. If there is a concept of "externally-installed" packages in this case, it's something outside the OS and generally managed by whoever built and installed this CPython. Because the installer chose not to add a marker file or modify sysconfig schemes, they're choosing the current behavior, and pip install can remove any packages available in this CPython. 2. A distro's /usr/bin/python3, either when running pip install as root or pip install --user, following our Recommendations for distros. These recommendations include shipping a marker file in the stdlib directory, to prevent pip install by default, and placing distro-shipped packages in a location other than the default sysconfig scheme, so that pip as root does not write to that location. Many distros (including Debian, Fedora, and their derivatives) are already doing the latter. On Debian and derivatives, pip install does not currently delete distro-installed packages, because Debian carries a patch to pip to prevent this. So, for those distros, this PEP is not a behavior change; it simply standardizes that behavior in a way that is no longer Debian-specific and can be included into upstream pip. (We have seen user reports of externally-installed packages being deleted on Debian or a derivative. We suspect this is because the user has previously run sudo pip install --upgrade pip and therefore now has a version of /usr/bin/pip without the Debian patch; standardizing this behavior in upstream package installers would address this problem.) 3. A distro Python when used inside a virtual environment (either from venv or virtualenv). Inside a virtual environment, all packages are owned by that environment. Even when pip, setuptools, etc. are installed into the environment, they are and should be managed by tools specific to that environment; they are not system-managed. 4. A distro Python when used inside a virtual environment with --system-site-packages. This is like the previous case, but worth calling out explicitly, because anything on the global sys.path is visible. Currently, the answer to "Will pip delete externally-installed packages" is no, because pip has a special case for running in a virtual environment and attempting to delete packages outside it. After this PEP, the answer remains no, but the reasoning becomes more general: system site packages will be outside any of the sysconfig schemes used for package management in the environment. 5. A distro Python when used in a single-application container image (e.g., a Docker container). In this use case, the risk of breaking system software is lower, since generally only a single application runs in the container, and the impact is lower, since you can rebuild the container and you don't have to struggle to recover a running machine. There are also a large number of existing Dockerfiles with an unqualified RUN pip install ... statement, etc., and it would be good not to break those. So, builders of base container images may want to ensure that the marker file is not present, even if the underlying OS ships one by default. There is a small behavior change: currently, pip run as root will delete externally-installed packages, but after this PEP it will not. We don't propose a way to override this. However, since the base image is generally minimal, there shouldn't be much of a use case for simply uninstalling packages (especially without using the distro's own tools). The common case is when pip wants to upgrade a package, which previously would have deleted the old version (except on Debian). After this change, the old version will still be on disk, but pip will still shadow externally-installed packages, and we believe this to be sufficient for this not to be a breaking change in practice - a Python import statement will still get you the newly-installed package. If it becomes necessary to have a way to do this, we suggest that the distro should document a way for the installer tool to access the sysconfig scheme used by the distro itself. See the Recommendations for distros section for more discussion. It is the view of the authors of this PEP that it's still a good idea to use virtual environments with distro-installed Python interpreters, even in single-application container images. Even though they run a single application, that application may run commands from the OS that are implemented in Python, and if you've installed or upgraded the distro-shipped Python packages using Python-specific tools, those commands may break. 6. Conda specifically supports the use of non-conda tools like pip to install software not available in the Conda repositories. In this context, Conda acts as the external package manager / distro and pip as the Python-specific one. In some sense, this is similar to the first case, since Conda provides its own installation of the Python interpreter. We don't believe this PEP requires any changes to Conda, and versions of pip that have implemented the changes in this PEP will continue to behave as they currently do inside Conda environments. (That said, it may be worth considering whether to use separate sysconfig schemes for pip-installed and Conda-installed software, for the same reasons it's a good idea for other distros.) 7. By a "developer-facing distro," we mean a specific type of distro where direct users of Python or other languages in the distro are expected or encouraged to make changes to the distro itself if they wish to add libraries. Common examples include private "monorepos" at software development companies, where a single repository builds both third-party and in-house software, and the direct users of the distro's Python interpreter are generally software developers writing said in-house software. User-level package managers like Nixpkgs may also count, because they encourage users of Nix who are Python developers to package their software for Nix. In these cases, the distro may want to respond to an attempted pip install with guidance encouraging use of the distro's own facilities for adding new packages, along with a link to documentation. If the distro supports/encourages creating a virtual environment from the distro's Python interpreter, there may also be custom instructions for how to properly set up a virtual environment (as for example Nixpkgs does). 8. When building distro Python packages for a distro Python (case 2), it may be useful to have pip install be usable as part of the distro's package build process. (Consider, for instance, building a python-xyz RPM by using pip install . inside an sdist / source tarball for xyz.) The distro may also want to use a more targeted but still Python-specific installation tool such as installer. For this case, the build process will need to find some way to suppress the marker file to allow pip install to work, and will probably need to point the Python-specific tool at the distro's sysconfig scheme instead of the shipped default. See the Recommendations for distros section for more discussion on how to implement this. As a result of this PEP, pip will no longer be able to remove packages already on the system. However, this behavior change is fine because a package build process should not (and generally cannot) include instructions to delete some other files on the system; it can only package up its own files. 9. A distro Python used with PYTHONHOME to set up an alternative Python environment (as opposed to a virtual environment), where PYTHONHOME is set to some directory copied directly from the distro Python (e.g., cp -a /usr/lib/python3.x pyhome/lib). Assuming there are no modifications, then the behavior is just like the underlying distro Python (case 2). So there are behavior changes - you can no longer pip install by default, and if you override it, it will no longer delete externally-installed packages (i.e., Python packages that were copied from the OS and live in the OS-managed sys.path entry). This behavior change seems to be defensible, in that if your PYTHONHOME is a straight copy of the distro's Python, it should behave like the distro's Python. 10. A distro Python (or any Python interpreter) used with a PYTHONHOME taken from a compatible unmodified upstream Python. Because the behavior changes in this PEP are keyed off of files in the standard library (the marker file in stdlib and the behavior of the sysconfig module), the behavior is just like an unmodified upstream CPython (case 1). Specification Marking an interpreter as using an external package manager Before a Python-specific package installer (that is, a tool such as pip - not an external tool such as apt) installs a package into a certain Python context, it should make the following checks by default: 1. Is it running outside of a virtual environment? It can determine this by whether sys.prefix == sys.base_prefix (but see Backwards Compatibility). 2. Is there an EXTERNALLY-MANAGED file in the directory identified by sysconfig.get_path("stdlib", sysconfig.get_default_scheme())? If both of these conditions are true, the installer should exit with an error message indicating that package installation into this Python interpreter's directory are disabled outside of a virtual environment. The installer should have a way for the user to override these rules, such as a command-line flag --break-system-packages. This option should not be enabled by default and should carry some connotation that its use is risky. The EXTERNALLY-MANAGED file is an INI-style metadata file intended to be parsable by the standard library configparser module. If the file can be parsed by configparser.ConfigParser(interpolation=None) using the UTF-8 encoding, and it contains a section [externally-managed], then the installer should look for an error message specified in the file and output it as part of its error. If the first element of the tuple returned by locale.getlocale(locale.LC_MESSAGES), i.e., the language code, is not None, it should look for the error message as the value of a key named Error- followed by the language code. If that key does not exist, and if the language code contains underscore or hyphen, it should look for a key named Error- followed by the portion of the language code before the underscore or hyphen. If it cannot find either of those, or if the language code is None, it should look for a key simply named Error. If the installer cannot find an error message in the file (either because the file cannot be parsed or because no suitable error key exists), then the installer should just use a pre-defined error message of its own, which should suggest that the user create a virtual environment to install packages. Software distributors who have a non-Python-specific package manager that manages libraries in the sys.path of their Python package should, in general, ship a EXTERNALLY-MANAGED file in their standard library directory. For instance, Debian may ship a file in /usr/lib/python3.9/EXTERNALLY-MANAGED consisting of something like [externally-managed] Error=To install Python packages system-wide, try apt install python3-xyz, where xyz is the package you are trying to install. If you wish to install a non-Debian-packaged Python package, create a virtual environment using python3 -m venv path/to/venv. Then use path/to/venv/bin/python and path/to/venv/bin/pip. Make sure you have python3-full installed. If you wish to install a non-Debian packaged Python application, it may be easiest to use pipx install xyz, which will manage a virtual environment for you. Make sure you have pipx installed. See /usr/share/doc/python3.9/README.venv for more information. which provides useful and distro-relevant information to a user trying to install a package. Optionally, translations can be provided in the same file: Error-de_DE=Wenn ist das Nunstück git und Slotermeyer? Ja! Beiherhund das Oder die Virtualenvironment gersput! In certain contexts, such as single-application container images that aren't updated after creation, a distributor may choose not to ship an EXTERNALLY-MANAGED file, so that users can install whatever they like (as they can today) without having to manually override this rule. Writing to only the target sysconfig scheme Usually, a Python package installer installs to directories in a scheme returned by the sysconfig standard library package. Ordinarily, this is the scheme returned by sysconfig.get_default_scheme(), but based on configuration (e.g. pip install --user), it may use a different scheme. Whenever the installer is installing to a sysconfig scheme, this PEP specifies that the installer should never modify or delete files outside of that scheme. For instance, if it's upgrading a package, and the package is already installed in a directory outside that scheme (perhaps in a directory from another scheme), it should leave the existing files alone. If the installer does end up shadowing an existing installation during an upgrade, we recommend that it produces a warning at the end of its run. If the installer is installing to a location outside of a sysconfig scheme (e.g., pip install --target), then this subsection does not apply. Recommendations for distros This section is non-normative. It provides best practices we believe distros should follow unless they have a specific reason otherwise. Mark the installation as externally managed Distros should create an EXTERNALLY-MANAGED file in their stdlib directory. Guide users towards virtual environments The file should contain a useful and distro-relevant error message indicating both how to install system-wide packages via the distro's package manager and how to set up a virtual environment. If your distro is often used by users in a state where the python3 command is available (and especially where pip or get-pip is available) but python3 -m venv does not work, the message should indicate clearly how to make python3 -m venv work properly. Consider packaging pipx, a tool for installing Python-language applications, and suggesting it in the error. pipx automatically creates a virtual environment for that application alone, which is a much better default for end users who want to install some Python-language software (which isn't available in the distro) but are not themselves Python users. Packaging pipx in the distro avoids the irony of instructing users to pip install --user --break-system-packages pipx to avoid breaking system packages. Consider arranging things so your distro's package / environment for Python for end users (e.g., python3 on Fedora or python3-full on Debian) depends on pipx. Keep the marker file in container images Distros that produce official images for single-application containers (e.g., Docker container images) should keep the EXTERNALLY-MANAGED file, preferably in a way that makes it not go away if a user of that image installs package updates inside their image (think RUN apt-get dist-upgrade). Create separate distro and local directories Distros should place two separate paths on the system interpreter's sys.path, one for distro-installed packages and one for packages installed by the local system administrator, and configure sysconfig.get_default_scheme() to point at the latter path. This ensures that tools like pip will not modify distro-installed packages. The path for the local system administrator should come before the distro path on sys.path so that local installs take preference over distro packages. For example, Fedora and Debian (and their derivatives) both implement this split by using /usr/local for locally-installed packages and /usr for distro-installed packages. Fedora uses /usr/local/lib/python3.x/site-packages vs. /usr/lib/python3.x/site-packages. (Debian uses /usr/local/lib/python3/dist-packages vs. /usr/lib/python3/dist-packages as an additional layer of separation from a locally-compiled Python interpreter: if you build and install upstream CPython in /usr/local/bin, it will look at /usr/local/lib/python3/site-packages, and Debian wishes to make sure that packages installed via the locally-built interpreter don't show up on sys.path for the distro interpreter.) Note that the /usr/local vs. /usr split is analogous to how the PATH environment variable typically includes /usr/local/bin:/usr/bin and non-distro software installs to /usr/local by default. This split is recommended by the Filesystem Hierarchy Standard. There are two ways you could do this. One is, if you are building and packaging Python libraries directly (e.g., your packaging helpers unpack a PEP 517-built wheel or call setup.py install), arrange for those tools to use a directory that is not in a sysconfig scheme but is still on sys.path. The other is to arrange for the default sysconfig scheme to change when running inside a package build versus when running on an installed system. The sysconfig customization hooks from bpo-43976 should make this easy (once accepted and implemented): make your packaging tool set an environment variable or some other detectable configuration, and define a get_preferred_schemes function to return a different scheme when called from inside a package build. Then you can use pip install as part of your distro packaging. We propose adding a --scheme=... option to instruct pip to run against a specific scheme. (See Implementation Notes below for how pip currently determines schemes.) Once that's available, for local testing and possibly for actual packaging, you would be able to run something like pip install --scheme=posix_distro to explicitly install a package into your distro's location (bypassing get_preferred_schemes). One could also, if absolutely needed, use pip uninstall --scheme=posix_distro to use pip to remove packages from the system-managed directory, which addresses the (hopefully theoretical) regression in use case 5 in Rationale. To install packages with pip, you would also need to either suppress the EXTERNALLY-MANAGED marker file to allow pip to run or to override it on the command line. You may want to use the same means for suppressing the marker file in build chroots as you do in container images. The advantage of setting these up to be automatic (suppressing the marker file in your build environment and having get_preferred_schemes automatically return your distro's scheme) is that an unadorned pip install will work inside a package build, which generally means that an unmodified upstream build script that happens to internally call pip install will do the right thing. You can, of course, just ensure that your packaging process always calls pip install --scheme=posix_distro --break-system-packages, which would work too. The best approach here depends a lot on your distro's conventions and mechanisms for packaging. Similarly, the sysconfig paths that are not for importable Python code - that is, include, platinclude, scripts, and data - should also have two variants, one for use by distro-packaged software and one for use for locally-installed software, and the distro should be set up such that both are usable. For instance, a typical FHS-compliant distro will use /usr/local/include for the default scheme's include and /usr/include for distro-packaged headers and place both on the compiler's search path, and it will use /usr/local/bin for the default scheme's scripts and /usr/bin for distro-packaged entry points and place both on $PATH. Backwards Compatibility All of these mechanisms are proposed for new distro releases and new versions of tools like pip only. In particular, we strongly recommend that distros with a concept of major versions only add the marker file or change sysconfig schemes in a new major version; otherwise there is a risk that, on an existing system, software installed via a Python-specific package manager now becomes unmanageable (without an override option). For a rolling-release distro, if possible, only add the marker file or change sysconfig schemes in a new Python minor version. One particular backwards-compatibility difficulty for package installation tools is likely to be managing environments created by old versions of virtualenv which have the latest version of the tool installed. A "virtual environment" now has a fairly precise definition: it uses the pyvenv.cfg mechanism, which causes sys.base_prefix != sys.prefix. It is possible, however, that a user may have an old virtual environment created by an older version of virtualenv; as of this writing, pip supports Python 3.6 onwards, which is in turn supported by virtualenv 15.1.0 onwards, so this scenario is possible. In older versions of virtualenv, the mechanism is instead to set a new attribute, sys.real_prefix, and it does not use the standard library support for virtual environments, so sys.base_prefix is the same as sys.prefix. So the logic for robustly detecting a virtual environment is something like: def is_virtual_environment(): return sys.base_prefix != sys.prefix or hasattr(sys, "real_prefix") Security Implications The purpose of this feature is not to implement a security boundary; it is to discourage well-intended changes from unexpectedly breaking a user's environment. That is to say, the reason this PEP restricts pip install outside a virtual environment is not that it's a security risk to be able to do so; it's that "There should be one--and preferably only one --obvious way to do it," and that way should be using a virtual environment. pip install outside a virtual environment is rather too obvious for what is almost always the wrong way to do it. If there is a case where a user should not be able to sudo pip install or pip install --user and add files to sys.path for security reasons, that needs to be implemented either via access control rules on what files the user can write to or an explicitly secured sys.path for the program in question. Neither of the mechanisms in this PEP should be interpreted as a way to address such a scenario. For those reasons, an attempted install with a marker file present is not a security incident, and there is no need to raise an auditing event for it. If the calling user legitimately has access to sudo pip install or pip install --user, they can accomplish the same installation entirely outside of Python; if they do not legitimately have such access, that's a problem outside the scope of this PEP. The marker file itself is located in the standard library directory, which is a trusted location (i.e., anyone who can write to the marker file used by a particular installer could, presumably, run arbitrary code inside the installer). Therefore, there is generally no need to filter out terminal escape sequences or other potentially-malicious content in the error message. Alternatives There are a number of similar proposals we considered that this PEP rejects or defers, largely to preserve the behavior in the case-by-case analysis in Rationale. Marker file Should the marker file be in sys.path, marking a particular directory as not to be written to by a Python-specific package manager? This would help with the second problem addressed by this PEP (not overwriting deleting distro-owned files) but not the first (incompatible installs). A directory-specific marker in /usr/lib/python3.x/site-packages would not discourage installations into either /usr/local/lib/python3.x/site-packages or ~/.local/lib/python3.x/site-packages, both of which are on sys.path for /usr/bin/python3. In other words, the marker file should not be interpreted as marking a single directory as externally managed (even though it happens to be in a directory on sys.path); it marks the entire Python installation as externally managed. Another variant of the above: should the marker file be in sys.path, where if it can be found in any directory in sys.path, it marks the installation as externally managed? An apparent advantage of this approach is that it automatically disables itself in virtual environments. Unfortunately, This has the wrong behavior with a --system-site-packages virtual environment, where the system-wide sys.path is visible but package installations are allowed. (It could work if the rule of exempting virtual environments is preserved, but that seems to have no advantage over the current scheme.) Should the marker just be a new attribute of a sysconfig scheme? There is some conceptual cleanliness to this, except that it's hard to override. We want to make it easy for container images, package build environments, etc. to suppress the marker file. A file that you can remove is easy; code in sysconfig is much harder to modify. Should the file be in /etc? No, because again, it refers to a specific Python installation. A user who installs their own Python may well want to install packages within the global context of that interpreter. Should the configuration setting be in pip.conf or distutils.cfg? Apart from the above objections about marking an installation, this mechanism isn't specific to either of those tools. (It seems reasonable for pip to also implement a configuration flag for users to prevent themselves from performing accidental non-virtual-environment installs in any Python installation, but that is outside the scope of this PEP.) Should the file be TOML? TOML is gaining popularity for packaging (see e.g. PEP 517) but does not yet have an implementation in the standard library. Strictly speaking, this isn't a blocker - distros need only write the file, not read it, so they don't need a TOML library (the file will probably be written by hand, regardless of format), and packaging tools likely have a TOML reader already. However, the INI format is currently used for various other forms of packaging metadata (e.g., pydistutils.cfg and setup.cfg), meets our needs, and is parsable by the standard library, and the pip maintainers expressed a preference to avoid using TOML for this yet. Should the file be email.message-style? While this format is also used for packaging metadata (e.g. sdist and wheel metadata) and is also parsable by the standard library, it doesn't handle multi-line entries quite as clearly, and that is our primary use case. Should the marker file be executable Python code that evaluates whether installation should be allowed or not? Apart from the concerns above about having the file in sys.path, we have a concern that making it executable is committing to too powerful of an API and risks making behavior harder to understand. (Note that the get_default_scheme hook of bpo-43976 is in fact executable, but that code needs to be supplied when the interpreter builds; it isn't intended to be supplied post-build.) When overriding the marker, should a Python-specific package manager be disallowed from shadowing a package installed by the external package manager (i.e., installing modules of the same name)? This would minimize the risk of breaking system software, but it's not clear it's worth the additional user experience complexity. There are legitimate use cases for shadowing system packages, and an additional command-line option to permit it would be more confusing. Meanwhile, not passing that option wouldn't eliminate the risk of breaking system software, which may be relying on a try: import xyz failing, finding a limited set of entry points, etc. Communicating this distinction seems difficult. We think it's a good idea for Python-specific package managers to print a warning if they shadow a package, but we think it's not worth disabling it by default. Why not use the INSTALLER file from PEP 376 to determine who installed a package and whether it can be removed? First, it's specific to a particular package (it's in the package's dist-info directory), so like some of the alternatives above, it doesn't provide information on an entire environment and whether package installations are permissible. PEP 627 also updates PEP 376 to prevent programmatic use of INSTALLER, specifying that the file is "to be used for informational purposes only. [...] Our goal is supporting interoperating tools, and basing any action on which tool happened to install a package runs counter to that goal." Finally, as PEP 627 envisions, there are legitimate use cases for one tool knowing how to handle packages installed by another tool; for instance, conda can safely remove a package installed by pip into a Conda environment. Why does the specification give no means for disabling package installations inside a virtual environment? We can't see a particularly strong use case for it (at least not one related to the purposes of this PEP). If you need it, it's simple enough to pip uninstall pip inside that environment, which should discourage at least unintentional changes to the environment (and this specification makes no provision to disable intentional changes, since after all the marker file can be easily removed). System Python Shouldn't distro software just run with the distro site-packages directory alone on sys.path and ignore the local system administrator's site-packages as well as the user-specific one? This is a worthwhile idea, and various versions of it have been circulating for a while under the name of "system Python" or "platform Python" (with a separate "user Python" for end users writing Python or installing Python software separate from the system). However, it's much more involved of a change. First, it would be a backwards-incompatible change. As mentioned in the Motivation section, there are valid use cases for running distro-installed Python applications like Sphinx or Ansible with locally-installed Python libraries available on their sys.path. A wholesale switch to ignoring local packages would break these use cases, and a distro would have to make a case-by-case analysis of whether an application ought to see locally-installed libraries or not. Furthermore, Fedora attempted this change and reverted it, finding, ironically, that their implementation of the change broke their package manager. Given that experience, there are clearly details to be worked out before distros can reliably implement that approach, and a PEP recommending it would be premature. This PEP is intended to be a complete and self-contained change that is independent of a distributor's decision for or against "system Python" or similar proposals. It is not incompatible with a distro implementing "system Python" in the future, and even though both proposals address the same class of problems, there are still arguments in favor of implementing something like "system Python" even after implementing this PEP. At the same time, though, this PEP specifically tries to make a more targeted and minimal change, such that it can be implemented by distributors who don't expect to adopt "system Python" (or don't expect to implement it immediately). The changes in this PEP stand on their own merits and are not an intermediate step for some future proposal. This PEP reduces (but does not eliminate) the risk of breaking system software while minimizing (but not completely avoiding) breaking changes, which should therefore be much easier to implement than the full "system Python" idea, which comes with the downsides mentioned above. We expect that the guidance in this PEP - that users should use virtual environments whenever possible and that distros should have separate sys.path directories for distro-managed and locally-managed modules - should make further experiments easier in the future. These may include distributing wholly separate "system" and "user" Python interpreters, running system software out of a distro-owned virtual environment or PYTHONHOME (but shipping a single interpreter), or modifying the entry points for certain software (such as the distro's package manager) to use a sys.path that only sees distro-managed directories. Those ideas themselves, however, remain outside the scope of this PEP. Implementation Notes This section is non-normative and contains notes relevant to both the specification and potential implementations. Currently, pip does not directly expose a way to choose a target sysconfig scheme, but it has three ways of looking up schemes when installing: pip install Calls sysconfig.get_default_scheme(), which is usually (in upstream CPython and most current distros) the same as get_preferred_scheme('prefix'). pip install --prefix=/some/path Calls sysconfig.get_preferred_scheme('prefix'). pip install --user Calls sysconfig.get_preferred_scheme('user'). Finally, pip install --target=/some/path writes directly to /some/path without looking up any schemes. Debian currently carries a patch to change the default install location inside a virtual environment__, using a few heuristics (including checking for the VIRTUAL_ENV environment variable), largely so that the directory used in a virtual environment remains site-packages and not dist-packages. This does not particularly affect this proposal, because the implementation of that patch does not actually change the default sysconfig scheme, and notably does not change the result of sysconfig.get_path("stdlib"). Fedora currently carries a patch to change the default install location when not running inside rpmbuild__, which they use to implement the two-system-wide-directories approach. This is conceptually the sort of hook envisioned by bpo-43976, except implemented as a code patch to distutils instead of as a changed sysconfig scheme. The implementation of is_virtual_environment above, as well as the logic to load the EXTERNALLY-MANAGED file and find the error message from it, may as well get added to the standard library (sys and sysconfig, respectively), to centralize their implementations, but they don't need to be added yet. References For additional background on these problems and previous attempts to solve them, see Debian bug 771794 "pip silently removes/updates system provided python packages" from 2014, Fedora's 2018 article Making sudo pip safe about pointing sudo pip at /usr/local (which acknowledges that the changes still do not make sudo pip completely safe), pip issues 5605 ("Disable upgrades to existing python modules which were not installed via pip") and 5722 ("pip should respect /usr/local") from 2018, and the post-PyCon US 2019 discussion thread Playing nice with external package managers. Copyright This document is placed in the public domain or under the CC0-1.0-Universal license, whichever is more permissive. [1] https://pip.pypa.io/en/stable/ [2] https://setuptools.readthedocs.io/en/latest/deprecated/easy_install.html (Note that the easy_install command was removed in setuptools version 52, released 23 January 2021.)
python-peps
2024-10-18T13:23:32.845223
2021-05-18T00:00:00
{ "license": "Public Domain", "url": "https://peps.python.org/pep-0668/", "authors": [ "Geoffrey Thomas" ], "pep_number": "0668", "pandoc_version": "3.5" }
0394
PEP: 394 Title: The "python" Command on Unix-Like Systems Author: Kerrick Staley <[email protected]>, Alyssa Coghlan <[email protected]>, Barry Warsaw <[email protected]>, Petr Viktorin <[email protected]>, Miro Hrončok <[email protected]>, Carol Willing <[email protected]>, Status: Active Type: Informational Created: 02-Mar-2011 Post-History: 04-Mar-2011, 20-Jul-2011, 16-Feb-2012, 30-Sep-2014, 28-Apr-2018, 26-Jun-2019 Resolution: https://mail.python.org/pipermail/python-dev/2012-February/116594.html Abstract This PEP outlines the behavior of Python scripts when the python command is invoked. Depending on a distribution or system configuration, python may or may not be installed. If python is installed its target interpreter may refer to python2 or python3. End users may be unaware of this inconsistency across Unix-like systems. This PEP's goal is to reduce user confusion about what python references and what will be the script's behavior. The recommendations in the next section of this PEP will outline the behavior when: - using virtual environments - writing cross-platform scripts with shebangs for either python2 or python3 The PEP's goal is to clarify the behavior for script end users, distribution providers, and script maintainers / authors. Recommendation Our recommendations are detailed below. We call out any expectations that these recommendations are based upon. For Python runtime distributors - We expect Unix-like software distributions (including systems like macOS and Cygwin) to install the python2 command into the default path whenever a version of the Python 2 interpreter is installed, and the same for python3 and the Python 3 interpreter. - When invoked, python2 should run some version of the Python 2 interpreter, and python3 should run some version of the Python 3 interpreter. - If the python command is installed, it is expected to invoke either the same version of Python as the python3 command or as the python2 command. - Distributors may choose to set the behavior of the python command as follows: - python2, - python3, - not provide python command, allow python to be configurable by an end user or a system administrator. - The Python 3.x idle, pydoc, and python-config commands should likewise be available as idle3, pydoc3, and python3-config; Python 2.x versions as idle2, pydoc2, and python2-config. The commands with no version number should either invoke the same version of Python as the python command, or not be available at all. - When packaging third party Python scripts, distributors are encouraged to change less specific shebangs to more specific ones. This ensures software is used with the latest version of Python available, and it can remove a dependency on Python 2. The details on what specifics to set are left to the distributors; though. Example specifics could include: - Changing python shebangs to python3 when Python 3.x is supported. - Changing python shebangs to python2 when Python 3.x is not yet supported. - Changing python3 shebangs to python3.8 if the software is built with Python 3.8. - When a virtual environment (created by the PEP 405 venv package or a similar tool such as virtualenv or conda) is active, the python command should refer to the virtual environment's interpreter and should always be available. The python3 or python2 command (according to the environment's interpreter version) should also be available. For Python script publishers - When reinvoking the interpreter from a Python script, querying sys.executable to avoid hardcoded assumptions regarding the interpreter location remains the preferred approach. - Encourage your end users to use a virtual environment. This makes the user's environment more predictable (possibly resulting in fewer issues), and helps avoid disrupting their system. - For scripts that are only expected to be run in an activated virtual environment, shebang lines can be written as #!/usr/bin/env python, as this instructs the script to respect the active virtual environment. - In cases where the script is expected to be executed outside virtual environments, developers will need to be aware of the following discrepancies across platforms and installation methods: - Older Linux distributions will provide a python command that refers to Python 2, and will likely not provide a python2 command. - Some newer Linux distributions will provide a python command that refers to Python 3. - Some Linux distributions will not provide a python command at all by default, but will provide a python3 command by default. - When potentially targeting these environments, developers may either use a Python package installation tool that rewrites shebang lines for the installed environment, provide instructions on updating shebang lines interactively, or else use more specific shebang lines that are tailored to the target environment. - Scripts targeting both “old systems” and systems without the default python command need to make a compromise and document this situation. Avoiding shebangs (via the console_scripts Entry Points ([1]) or similar means) is the recommended workaround for this problem. - Applications designed exclusively for a specific environment (such as a container or virtual environment) may continue to use the python command name. For end users of Python - While far from being universally available, python remains the preferred spelling for explicitly invoking Python, as this is the spelling that virtual environments make consistently available across different platforms and Python installations. - For software that is not distributed with (or developed for) your system, we recommend using a virtual environment, possibly with an environment manager like conda or pipenv, to help avoid disrupting your system Python installation. These recommendations are the outcome of the relevant python-dev discussions in March and July 2011 ([2],[3]), February 2012 ([4]), September 2014 ([5]), discussion on GitHub in April 2018 ([6]), on python-dev in February 2019 ([7]), and during the PEP update review in May/June 2019 ([8]). History of this PEP In 2011, the majority of distributions aliased the python command to Python 2, but some started switching it to Python 3 ([9]). As some of the former distributions did not provide a python2 command by default, there was previously no way for Python 2 code (or any code that invokes the Python 2 interpreter directly rather than via sys.executable) to reliably run on all Unix-like systems without modification, as the python command would invoke the wrong interpreter version on some systems, and the python2 command would fail completely on others. This PEP originally provided a very simple mechanism to restore cross-platform support, with minimal additional work required on the part of distribution maintainers. Simplified, the recommendation was: 1. The python command was preferred for code compatible with both Python 2 and 3 (since it was available on all systems, even those that already aliased it to Python 3). 2. The python command should always invoke Python 2 (to prevent hard-to-diagnose errors when Python 2 code is run on Python 3). 3. The python2 and python3 commands should be available to specify the version explicitly. However, these recommendations implicitly assumed that Python 2 would always be available. As Python 2 is nearing its end of life in 2020 (PEP 373, PEP 404), distributions are making Python 2 optional or removing it entirely. This means either removing the python command or switching it to invoke Python 3. Some distributors also decided that their users were better served by ignoring the PEP's original recommendations, and provided system administrators with the freedom to configure their systems based on the needs of their particular environment. Current Rationale As of 2019, activating a Python virtual environment (or its functional equivalent) prior to script execution is one way to obtain a consistent cross-platform and cross-distribution experience. Accordingly, publishers can expect users of the software to provide a suitable execution environment. Future Changes to this Recommendation This recommendation will be periodically reviewed over the next few years, and updated when the core development team judges it appropriate. As a point of reference, regular maintenance releases for the Python 2.7 series will continue until January 2020. Migration Notes This section does not contain any official recommendations from the core CPython developers. It's merely a collection of notes regarding various aspects of migrating to Python 3 as the default version of Python for a system. They will hopefully be helpful to any distributions considering making such a change. - The main barrier to a distribution switching the python command from python2 to python3 isn't breakage within the distribution, but instead breakage of private third party scripts developed by sysadmins and other users. Updating the python command to invoke python3 by default indicates that a distribution is willing to break such scripts with errors that are potentially quite confusing for users that aren't familiar with the backwards incompatible changes in Python 3. For example, while the change of print from a statement to a builtin function is relatively simple for automated converters to handle, the SyntaxError from attempting to use the Python 2 notation in Python 3 may be confusing for users that are not aware of the change: $ python3 -c 'print "Hello, world!"' File "<string>", line 1 print "Hello, world!" ^ SyntaxError: Missing parentheses in call to 'print'. Did you mean print("Hello, world!")? While this might be obvious for experienced Pythonistas, such scripts might even be run by people who are not familiar with Python at all. Avoiding breakage of such third party scripts was the key reason this PEP used to recommend that python continue to refer to python2. - The error message python: command not found tends to be surprisingly actionable, even for people unfamiliar with Python. - The pythonX.X (e.g. python3.6) commands exist on modern systems, on which they invoke specific minor versions of the Python interpreter. It can be useful for distribution-specific packages to take advantage of these utilities if they exist, since it will prevent code breakage if the default minor version of a given major version is changed. However, scripts intending to be cross-platform should not rely on the presence of these utilities, but rather should be tested on several recent minor versions of the target major version, compensating, if necessary, for the small differences that exist between minor versions. This prevents the need for sysadmins to install many very similar versions of the interpreter. - When the pythonX.X binaries are provided by a distribution, the python2 and python3 commands should refer to one of those files rather than being provided as a separate binary file. - It is strongly encouraged that distribution-specific packages use python3 (or python2) rather than python, even in code that is not intended to operate on other distributions. This will reduce problems if the distribution later decides to change the version of the Python interpreter that the python command invokes, or if a sysadmin installs a custom python command with a different major version than the distribution default. - If the above point is adhered to and sysadmins are permitted to change the python command, then the python command should always be implemented as a link to the interpreter binary (or a link to a link) and not vice versa. That way, if a sysadmin does decide to replace the installed python file, they can do so without inadvertently deleting the previously installed binary. - Even as the Python 2 interpreter becomes less common, it remains reasonable for scripts to continue to use the python3 convention, rather than just python. - If these conventions are adhered to, it will become the case that the python command is only executed in an interactive manner as a user convenience, or else when using a virtual environment or similar mechanism. Backwards Compatibility A potential problem can arise if a script adhering to the python2/python3 convention is executed on a system not supporting these commands. This is mostly a non-issue, since the sysadmin can simply create these symbolic links and avoid further problems. It is a significantly more obvious breakage than the sometimes cryptic errors that can arise when attempting to execute a script containing Python 2 specific syntax with a Python 3 interpreter or vice versa. Application to the CPython Reference Interpreter While technically a new feature, the make install and make bininstall command in the 2.7 version of CPython were adjusted to create the following chains of symbolic links in the relevant bin directory (the final item listed in the chain is the actual installed binary, preceding items are relative symbolic links): python -> python2 -> python2.7 python-config -> python2-config -> python2.7-config Similar adjustments were made to the macOS binary installer. This feature first appeared in the default installation process in CPython 2.7.3. The installation commands in the CPython 3.x series already create the appropriate symlinks. For example, CPython 3.2 creates: python3 -> python3.2 idle3 -> idle3.2 pydoc3 -> pydoc3.2 python3-config -> python3.2-config And CPython 3.3 creates: python3 -> python3.3 idle3 -> idle3.3 pydoc3 -> pydoc3.3 python3-config -> python3.3-config pysetup3 -> pysetup3.3 The implementation progress of these features in the default installers was managed on the tracker as issue #12627 ([10]). Impact on PYTHON* Environment Variables The choice of target for the python command implicitly affects a distribution's expected interpretation of the various Python related environment variables. The use of *.pth files in the relevant site-packages folder, the "per-user site packages" feature (see python -m site) or more flexible tools such as virtualenv are all more tolerant of the presence of multiple versions of Python on a system than the direct use of PYTHONPATH. Exclusion of MS Windows This PEP deliberately excludes any proposals relating to Microsoft Windows, as devising an equivalent solution for Windows was deemed too complex to handle here. PEP 397 and the related discussion on the python-dev mailing list address this issue. References Copyright This document has been placed in the public domain. [1] The console_scripts Entry Point (https://python-packaging.readthedocs.io/en/latest/command-line-scripts.html#the-console-scripts-entry-point) [2] Support the /usr/bin/python2 symlink upstream (with bonus grammar class!) (https://mail.python.org/pipermail/python-dev/2011-March/108491.html) [3] Rebooting PEP 394 (aka Support the /usr/bin/python2 symlink upstream) (https://mail.python.org/pipermail/python-dev/2011-July/112322.html) [4] PEP 394 request for pronouncement (python2 symlink in *nix systems) (https://mail.python.org/pipermail/python-dev/2012-February/116435.html) [5] PEP 394 - Clarification of what "python" command should invoke (https://mail.python.org/pipermail/python-dev/2014-September/136374.html) [6] PEP 394: Allow the python command to not be installed, and other minor edits (https://github.com/python/peps/pull/630) [7] Another update for PEP 394 -- The "python" Command on Unix-Like Systems (https://mail.python.org/pipermail/python-dev/2019-February/156272.html) [8] May 2019 PEP update review (https://github.com/python/peps/pull/989) [9] Arch Linux announcement that their "python" link now refers Python 3 (https://www.archlinux.org/news/python-is-now-python-3/) [10] Implement PEP 394 in the CPython Makefile (https://github.com/python/cpython/issues/56836)
python-peps
2024-10-18T13:23:32.875113
2011-03-02T00:00:00
{ "license": "Public Domain", "url": "https://peps.python.org/pep-0394/", "authors": [ "Kerrick Staley" ], "pep_number": "0394", "pandoc_version": "3.5" }
0611
PEP: 611 Title: The one million limit Author: Mark Shannon <[email protected]> Status: Withdrawn Type: Standards Track Content-Type: text/x-rst Created: 05-Dec-2019 Post-History: Abstract This PR proposes a soft limit of one million (1 000 000), and a larger hard limit for various aspects of Python code and its implementation. The Python language does not specify limits for many of its features. Not having any limit to these values seems to enhance programmer freedom, at least superficially, but in practice the CPython VM and other Python virtual machines have implicit limits or are forced to assume that the limits are astronomical, which is expensive. This PR lists a number of features which are to have a limit of one million. For CPython the hard limit will be eight million (8 000 000). Motivation There are many values that need to be represented in a virtual machine. If no limit is specified for these values, then the representation must either be inefficient or vulnerable to overflow. The CPython virtual machine represents values like line numbers, stack offsets and instruction offsets by 32 bit values. This is inefficient, and potentially unsafe. It is inefficient as actual values rarely need more than a dozen or so bits to represent them. It is unsafe as malicious or poorly generated code could cause values to exceed 2³². For example, line numbers are represented by 32 bit values internally. This is inefficient, given that modules almost never exceed a few thousand lines. Despite being inefficient, it is still vulnerable to overflow as it is easy for an attacker to created a module with billions of newline characters. Memory access is usually a limiting factor in the performance of modern CPUs. Better packing of data structures enhances locality and reduces memory bandwidth, at a modest increase in ALU usage (for shifting and masking). Being able to safely store important values in 20 bits would allow memory savings in several data structures including, but not limited to: - Frame objects - Object headers - Code objects There is also the potential for a more efficient instruction format, speeding up interpreter dispatch. Is this a worthwhile trade off? The downside of any form of limit is that it might potentially make someone's job harder, for example, it may be harder to write a code generator that keeps the size of modules to one million lines. However, it is the author's opinion, having written many code generators, that such a limit is extremely unlikely to be a problem in practice. The upside of these limits is the freedom it grants implementers of runtimes, whether CPython, PyPy, or any other implementation, to improve performance. It is the author's belief, that the potential value of even a 0.1% reduction in the cost of running Python programs globally will hugely exceed the cost of modifying a handful of code generators. Rationale Imposing a limit on values such as lines of code in a module, and the number of local variables, has significant advantages for ease of implementation and efficiency of virtual machines. If the limit is sufficiently large, there is no adverse effect on users of the language. By selecting a fixed but large limit for these values, it is possible to have both safety and efficiency whilst causing no inconvenience to human programmers and only very rare problems for code generators. One million The value "one million" is very easy to remember. The one million limit is mostly a limit on human generated code, not runtime sizes. One million lines in a single module is a ridiculous concentration of code; the entire Python standard library is about 2/3rd of a million lines, spread over 1600 files. The Java Virtual Machine (JVM)[1] specifies a limit of 2¹⁶-1 (65535) for many program elements similar to those covered here. This limit enables limited values to fit in 16 bits, which is a very efficient machine representation. However, this limit is quite easily exceeded in practice by code generators and the author is aware of existing Python code that already exceeds 2¹⁶ lines of code. The hard limit of eight million fits into 23 bits which, although not as convenient for machine representation, is still reasonably compact. A limit of eight million is small enough for efficiency advantages (only 23 bits), but large enough not to impact users (no one has ever written a module that large). While it is possible that generated code could exceed the limit, it is easy for a code generator to modify its output to conform. The author has hit the 64K limit in the JVM on at least two occasions when generating Java code. The workarounds were relatively straightforward and wouldn't have been necessary with a limit of one million bytecodes or lines of code. Where necessary, the soft limit can increased for those programs that exceed the one million limit. Having a soft limit of one million provides a warning of problematic code, without causing an error and forcing an immediate fix. It also allows dynamic optimizers to use more compact formats without inline checks. Specification This PR proposes that the following language features and runtime values have a soft limit of one million. - The number of source code lines in a module - The number of bytecode instructions in a code object. - The sum of local variables and stack usage for a code object. - The number of classes in a running interpreter. - The recursion depth of Python code. It is likely that memory constraints would be a limiting factor before the number of classes reaches one million. Recursion depth The recursion depth limit only applies to pure Python code. Code written in a foreign language, such as C, may consume hardware stack and thus be limited to a recursion depth of a few thousand. It is expected that implementations will raise an exception should the hardware stack get close to its limit. For code that mixes Python and C calls, it is most likely that the hardware limit will apply first. The size of the hardware recursion may vary at runtime and will not be visible. Soft and hard limits Implementations should emit a warning whenever a soft limit is exceeded, unless the hard limit has the same value as the soft limit. When a hard limit is exceeded, then an exception should be raised. Depending on the implementation, different hard limits might apply. In some cases the hard limit might be below the soft limit. For example, many micropython ports are unlikely to be able to support such large limits. Introspecting and modifying the limits One or more functions will be provided in the sys module to introspect or modify the soft limits at runtime, but the limits may not be raised above the hard limit. Inferred limits These limits are not part of the specification, but a limit of less than one million can be inferred from the limit on the number of bytecode instructions in a code object. Because there would be insufficient instructions to load more than one million constants or use more than one million names. - The number of distinct names in a code object. - The number of constants in a code object. The advantages for CPython of imposing these limits: Line of code in a module and code object restrictions. When compiling source code to bytecode or modifying bytecode for profiling or debugging, an intermediate form is required. By limiting operands to 23 bits, instructions can be represented in a compact 64 bit form allowing very fast passes over the instruction sequence. Having 23 bit operands (24 bits for relative branches) allows instructions to fit into 32 bits without needing additional EXTENDED_ARG instructions. This improves dispatch, as the operand is strictly local to the instruction. It is unclear whether this would help performance, it is merely an example of what is possible. The benefit of restricting the number of lines in a module is primarily the implied limit on bytecodes. It is more important for implementations that it is instructions per code object, not lines per module, that is limited to one million, but it is much easier to explain a one million line limit. Having a consistent limit of one million is just easier to remember. It is mostly likely, although not guaranteed, that the line limit will be hit first and thus provide a simpler to understand error message to the developer. Total number of classes in a running interpreter This limit has to the potential to reduce the size of object headers considerably. Currently objects have a two word header, for objects without references (int, float, str, etc.) or a four word header for objects with references. By reducing the maximum number of classes, the space for the class reference can be reduced from 64 bits to fewer than 32 bits allowing a much more compact header. For example, a super-compact header format might look like this: struct header { uint32_t gc_flags:6; /* Needs finalisation, might be part of a cycle, etc. */ uint32_t class_id:26; /* Can be efficiently mapped to address by ensuring suitable alignment of classes */ uint32_t refcount; /* Limited memory or saturating */ } This format would reduce the size of a Python object without slots, on a 64 bit machine, from 40 to 16 bytes. Note that there are two ways to use a 32 bit refcount on a 64 bit machine. One is to limit each sub-interpreter to 32Gb of memory. The other is to use a saturating reference count, which would be a little bit slower, but allow unlimited memory allocation. Enforcement Python implementations are not obliged to enforce the limits. However, if a limit can be enforced without hurting performance, then it should be. It is anticipated that CPython will enforce the limits as follows: - The number of source code lines in a module: version 3.9 onward. - The number of bytecode instructions in a code object: 3.9 onward. - The sum of local variables and stack usage for a code object: 3.9 onward. - The number of classes in a running interpreter: probably 3.10 onward, maybe warning in 3.9. Hard limits in CPython CPython will enforce a hard limit on all the above values. The value of the hard limit will be 8 million. It is hypothetically possible that some machine generated code exceeds one or more of the above limits. The author believes that to be incredibly unlikely and easily fixed by modifying the output stage of the code generator. We would like to gain the benefit from the above limits for performance as soon as possible. To that end, CPython will start applying limits from version 3.9 onward. To ease the transition and minimize breakage, the initial limits will be 16 million, reducing to 8 million in a later version. Backwards Compatibility The actual hard limits enforced by CPython will be: +---------------+--------------+ | Version | Hard limit | +===============+==============+ | 3.9 | 16 million | +---------------+--------------+ | 3.10 onward | 8 million | +---------------+--------------+ Given the rarity of code generators that would exceed the one million limits, and the environments in which they are typically used, it seems reasonable to start issuing warnings in 3.9 if any limited quantity exceeds one million. Historically the recursion limit has been set at 1000. To avoid breaking code that implicitly relies on the value being small, the soft recursion limit will be increased gradually, as follows: +---------+-------------+ | Version | Soft limit | +=========+=============+ | 3.9 | 4 000 | +---------+-------------+ | 3.10 | 16 000 | +---------+-------------+ | 3.11 | 64 000 | +---------+-------------+ | 3.12 | 125 000 | +---------+-------------+ | 3.13 | 1 million | +---------+-------------+ The hard limit will be set to 8 million immediately. Other implementations Implementations of Python other than CPython have different purposes, so different limits might be appropriate. This is acceptable, provided the limits are clearly documented. General purpose implementations General purpose implementations, such as PyPy, should use the one million limit. If maximum compatibility is a goal, then they should also follow CPython's behaviour for 3.9 to 3.11. Special purpose implementations Special purpose implementations may use lower limits, as long as they are clearly documented. An implementation designed for embedded systems, for example MicroPython, might impose limits as low as a few thousand. Security Implications Minimal. This reduces the attack surface of any Python virtual machine by a small amount. Reference Implementation None, as yet. This will be implemented in CPython, once the PEP has been accepted. Rejected Ideas Being able to modify the hard limits upwards at compile time was suggested by Tal Einat. This is rejected as the current limits of 2³² have not been an issue, and the practical advantages of allowing limits between 2²⁰ and 2³² seem slight compared to the additional code complexity of supporting such a feature. Open Issues None, as yet. References https://docs.oracle.com/javase/specs/jvms/se8/jvms8.pdf Copyright This document is placed in the public domain or under the CC0-1.0-Universal license, whichever is more permissive. [1] The Java Virtual Machine specification
python-peps
2024-10-18T13:23:33.002947
2019-12-05T00:00:00
{ "license": "Public Domain", "url": "https://peps.python.org/pep-0611/", "authors": [ "Mark Shannon" ], "pep_number": "0611", "pandoc_version": "3.5" }
0632
PEP: 632 Title: Deprecate distutils module Author: Steve Dower <[email protected]> Discussions-To: https://discuss.python.org/t/pep-632-deprecate-distutils-module/5134 Status: Final Type: Standards Track Content-Type: text/x-rst Created: 03-Sep-2020 Python-Version: 3.10 Post-History: 03-Sep-2020, 22-Jan-2021 Resolution: https://mail.python.org/archives/list/[email protected]/thread/TXU6TVOMBLQU3SV57DMMOA5Y2E67AW7P/ Abstract The distutils module[1] has for a long time recommended using the setuptools package[2] instead. Setuptools has recently integrated a complete copy of distutils and is no longer dependent on the standard library[3]. Pip has been silently replacing distutils with setuptools when installing packages for a long time already, and the distutils documentation has stated that it is being phased out since 2014 (or earlier). It is time to remove it from the standard library. Motivation distutils[4] is a largely undocumented and unmaintained collection of utilities for packaging and distributing Python packages, including compilation of native extension modules. It defines a configuration format that describes a Python distribution and provides the tools to convert a directory of source code into a source distribution, and some forms of binary distribution. Because of its place in the standard library, many updates can only be released with a major release, and users cannot rely on particular fixes being available. setuptools[5] is a better documented and well maintained enhancement based on distutils. While it provides very similar functionality, it is much better able to support users on earlier Python releases, and can respond to bug reports more quickly. A number of platform-specific enhancements already exist in setuptools that have not been added to distutils, and there is been a long-standing recommendation in the distutils documentation to prefer setuptools. Historically, setuptools has extended distutils using subclassing and monkeypatching, but has now taken a copy of the underlying code.[6] As a result, the second last major dependency on distutils is gone and there is no need to keep it in the standard library. The final dependency on distutils is CPython itself, which uses it to build native extension modules in the standard library (except on Windows). Because this is a CPython build-time dependency, it is possible to continue to use distutils for this specific case without it being part of the standard library. Deprecation and removal will make it obvious that issues should be fixed in the setuptools project, and will reduce a source of bug reports and unnecessary test maintenance. It will also help promote the development of alternative build backends, which can now be supported more easily thanks to PEP 517. Specification In Python 3.10 and 3.11, distutils will be formally marked as deprecated. All known issues will be closed at this time. import distutils will raise a deprecation warning. New issues that would be considered release blocking may still be fixed, but support for new tools or platforms will not be added. During Python 3.10 and 3.11, uses of distutils within the standard library may change to use alternative APIs. In Python 3.12, distutils will no longer be installed by make install or any of the first-party distribution. Third-party redistributors should no longer include distutils in their bundles or repositories. This PEP makes no specification on migrating the parts of the CPython build process that currently use distutils. Depending on contributions, this migration may occur at any time. After Python 3.12 is started and when the CPython build process no longer depends on distutils being in the standard library, the entire Lib/distutils directory and Lib/test/test_distutils.py file will be removed from the repository. Other references to distutils will be cleaned up. As of Python 3.9's initial release, the following modules have references in code or comments: - Lib/ctypes/util.py - Lib/site.py - Lib/sysconfig.py - Lib/_aix_support.py - Lib/_bootsubprocess.py - Lib/_osx_support.py - Modules/_decimal/tests/formathelper.py The following Tools in CPython also refer to distutils. Note that none of these are installed with CPython: - PC/layout (references will be removed) - Tools/msi (references will be removed) - Tools/peg_generator (will be adapted to a different build tool) - Tools/test2to3 (example project will be removed) As the distutils code is already included in setuptools, there is no need to republish it in any other form. Those who require access to the functionality should use setuptools or an alternative build backend. Backwards Compatibility Code that imports distutils will no longer work from Python 3.12. The suggested migration path is to use the equivalent (though not identical) imports from setuptools (see[7]), or to migrate to an alternative build backend (see PEP 517). Code already exists in setuptools to transparently switch setup.py files using distutils onto their equivalents, and so most working build scripts are already known to work with setuptools. Such scripts may need to update their import statements. Consult the setuptools documentation for specific migration advice.[8] Some projects use alternate sets of patches over distutils, notably, numpy.distutils.[9] Projects that we know are doing this have been informed. Many build scripts use custom commands or narrowly scoped patches. As these packages are already subject to setuptools overriding distutils, we expect minimal disruption as a result of distutils being removed. Scripts may still need to be updated to avoid importing distutils. Reference Implementation setuptools version 48 includes the complete copy of distutils, and as such is no longer dependent on the standard library's copy. Most implementation issues they have faced are due to the continuing existence of distutils in the standard library, and so removal will improve the stability of their implementation. There is not yet a reference implementation for the removal of distutils from the standard library, nor is there an implementation for CPython's native module builds without relying on the standard library copy of distutils. Migration Advice Note This section suggests some alternative replacements for popular functionality that is being formally deprecated with this PEP. It is current at time of writing, but is not kept up to date. For these modules or types, setuptools is the best substitute: - distutils.ccompiler - distutils.cmd.Command - distutils.command - distutils.config - distutils.core.Distribution - distutils.errors For these modules or types, use the standards-defined Python Packaging Authority packages specified: - distutils.version — use the packaging package For these modules or functions, use the standard library module shown: - distutils.fancy_getopt — use the argparse module - distutils.spawn.find_executable — use the shutil.which function - distutils.spawn.spawn — use the subprocess.run function - distutils.sysconfig — use the sysconfig module - distutils.util.get_platform — use the platform module For these functions, and any others not mentioned here, you will need to reimplement the functionality yourself. The legacy documentation can be found at https://docs.python.org/3.9/distutils/apiref.html - distutils.dir_util.create_tree - distutils.util.change_root - distutils.util.strtobool Rejected Ideas Deprecate but do not delete The primary concern with this approach is that distutils most frequently breaks because of platform differences, which means that without maintenance, it will stop working out-of-sync with any Python release. This makes it impossible for libraries to reliably detect when they will stop working. In contrast, this PEP proposes a concrete date, known well in advance, when distutils will stop working, and commits to not breaking the API before that time. This gives maintainers a predictable schedule, ensures any breakage occurs at a point where users will already be expecting changed behavior, and provides a reliable detection mechanism (specifically, that import distutils raises). Finally, as long as distutils remains in the standard library in any form, it will interfere with third-party packages that provide shims or replacements, including setuptools. Completely removing the package at a known version makes it possible for third-parties to safely use a substitute. Only deprecate the setuptools-like functionality This suggestion assumes that there exists a volunteer to maintain whatever is left, which is not true. It also implies that anybody knows which functionality should remain, which as seen in the discussions is not at all clear. Most helper functions in distutils already have supported (and improved) alternatives, often in the standard library, and there is little that can be done to the legacy versions without breaking compatibility. (And any break requiring maintainers to update their code is essentially equivalent to requiring them to import a different function.) The last point from the previous section also applies here. References Copyright This document is placed in the public domain or under the CC0-1.0-Universal license, whichever is more permissive. Local Variables: mode: indented-text indent-tabs-mode: nil sentence-end-double-space: t fill-column: 70 coding: utf-8 End: [1] distutils - Building and installing Python modules (https://docs.python.org/3.9/library/distutils.html) [2] setuptools - PyPI (https://pypi.org/project/setuptools/) [3] setuptools Issue #417 - Adopt distutils (https://github.com/pypa/setuptools/issues/417) [4] distutils - Building and installing Python modules (https://docs.python.org/3.9/library/distutils.html) [5] setuptools - PyPI (https://pypi.org/project/setuptools/) [6] setuptools Issue #417 - Adopt distutils (https://github.com/pypa/setuptools/issues/417) [7] Porting from Distutils (https://setuptools.readthedocs.io/en/latest/deprecated/distutils-legacy.html) [8] Porting from Distutils (https://setuptools.readthedocs.io/en/latest/deprecated/distutils-legacy.html) [9] Packaging (numpy.distutils) (https://numpy.org/doc/stable/reference/distutils.html)
python-peps
2024-10-18T13:23:33.019206
2020-09-03T00:00:00
{ "license": "Public Domain", "url": "https://peps.python.org/pep-0632/", "authors": [ "Steve Dower" ], "pep_number": "0632", "pandoc_version": "3.5" }
0612
PEP: 612 Title: Parameter Specification Variables Author: Mark Mendoza <[email protected]> Sponsor: Guido van Rossum <[email protected]> BDFL-Delegate: Guido van Rossum <[email protected]> Discussions-To: [email protected] Status: Final Type: Standards Track Topic: Typing Created: 18-Dec-2019 Python-Version: 3.10 Post-History: 18-Dec-2019, 13-Jul-2020 typing:paramspec and typing.ParamSpec Abstract There currently are two ways to specify the type of a callable, the Callable[[int, str], bool] syntax defined in PEP 484, and callback protocols from PEP 544 <544#callback-protocols>. Neither of these support forwarding the parameter types of one callable over to another callable, making it difficult to annotate function decorators. This PEP proposes typing.ParamSpec and typing.Concatenate to support expressing these kinds of relationships. Motivation The existing standards for annotating higher order functions don’t give us the tools to annotate the following common decorator pattern satisfactorily: from typing import Awaitable, Callable, TypeVar R = TypeVar("R") def add_logging(f: Callable[..., R]) -> Callable[..., Awaitable[R]]: async def inner(*args: object, **kwargs: object) -> R: await log_to_database() return f(*args, **kwargs) return inner @add_logging def takes_int_str(x: int, y: str) -> int: return x + 7 await takes_int_str(1, "A") await takes_int_str("B", 2) # fails at runtime add_logging, a decorator which logs before each entry into the decorated function, is an instance of the Python idiom of one function passing all arguments given to it over to another function. This is done through the combination of the *args and **kwargs features in both parameters and in arguments. When one defines a function (like inner) that takes (*args, **kwargs) and goes on to call another function with (*args, **kwargs), the wrapping function can only be safely called in all of the ways that the wrapped function could be safely called. To type this decorator, we’d like to be able to place a dependency between the parameters of the callable f and the parameters of the returned function. PEP 484 supports dependencies between single types, as in def append(l: typing.List[T], e: T) -> typing.List[T]: ..., but there is no existing way to do so with a complicated entity like the parameters of a function. Due to the limitations of the status quo, the add_logging example will type check but will fail at runtime. inner will pass the string “B” into takes_int_str, which will try to add 7 to it, triggering a type error. This was not caught by the type checker because the decorated takes_int_str was given the type Callable[..., Awaitable[int]] (an ellipsis in place of parameter types is specified to mean that we do no validation on arguments). Without the ability to define dependencies between the parameters of different callable types, there is no way, at present, to make add_logging compatible with all functions, while still preserving the enforcement of the parameters of the decorated function. With the addition of the ParamSpec variables proposed by this PEP, we can rewrite the previous example in a way that keeps the flexibility of the decorator and the parameter enforcement of the decorated function. from typing import Awaitable, Callable, ParamSpec, TypeVar P = ParamSpec("P") R = TypeVar("R") def add_logging(f: Callable[P, R]) -> Callable[P, Awaitable[R]]: async def inner(*args: P.args, **kwargs: P.kwargs) -> R: await log_to_database() return f(*args, **kwargs) return inner @add_logging def takes_int_str(x: int, y: str) -> int: return x + 7 await takes_int_str(1, "A") # Accepted await takes_int_str("B", 2) # Correctly rejected by the type checker Another common decorator pattern that has previously been impossible to type is the practice of adding or removing arguments from the decorated function. For example: class Request: ... def with_request(f: Callable[..., R]) -> Callable[..., R]: def inner(*args: object, **kwargs: object) -> R: return f(Request(), *args, **kwargs) return inner @with_request def takes_int_str(request: Request, x: int, y: str) -> int: # use request return x + 7 takes_int_str(1, "A") takes_int_str("B", 2) # fails at runtime With the addition of the Concatenate operator from this PEP, we can even type this more complex decorator. from typing import Concatenate def with_request(f: Callable[Concatenate[Request, P], R]) -> Callable[P, R]: def inner(*args: P.args, **kwargs: P.kwargs) -> R: return f(Request(), *args, **kwargs) return inner @with_request def takes_int_str(request: Request, x: int, y: str) -> int: # use request return x + 7 takes_int_str(1, "A") # Accepted takes_int_str("B", 2) # Correctly rejected by the type checker Specification ParamSpec Variables Declaration A parameter specification variable is defined in a similar manner to how a normal type variable is defined with typing.TypeVar. from typing import ParamSpec P = ParamSpec("P") # Accepted P = ParamSpec("WrongName") # Rejected because P =/= WrongName The runtime should accept bounds and covariant and contravariant arguments in the declaration just as typing.TypeVar does, but for now we will defer the standardization of the semantics of those options to a later PEP. Valid use locations Previously only a list of parameter arguments ([A, B, C]) or an ellipsis (signifying "undefined parameters") were acceptable as the first "argument" to typing.Callable . We now augment that with two new options: a parameter specification variable (Callable[P, int]) or a concatenation on a parameter specification variable (Callable[Concatenate[int, P], int]). callable ::= Callable "[" parameters_expression, type_expression "]" parameters_expression ::= | "..." | "[" [ type_expression ("," type_expression)* ] "]" | parameter_specification_variable | concatenate "[" type_expression ("," type_expression)* "," parameter_specification_variable "]" where parameter_specification_variable is a typing.ParamSpec variable, declared in the manner as defined above, and concatenate is typing.Concatenate. As before, parameters_expressions by themselves are not acceptable in places where a type is expected def foo(x: P) -> P: ... # Rejected def foo(x: Concatenate[int, P]) -> int: ... # Rejected def foo(x: typing.List[P]) -> None: ... # Rejected def foo(x: Callable[[int, str], P]) -> None: ... # Rejected User-Defined Generic Classes Just as defining a class as inheriting from Generic[T] makes a class generic for a single parameter (when T is a TypeVar), defining a class as inheriting from Generic[P] makes a class generic on parameters_expressions (when P is a ParamSpec). T = TypeVar("T") P_2 = ParamSpec("P_2") class X(Generic[T, P]): f: Callable[P, int] x: T def f(x: X[int, P_2]) -> str: ... # Accepted def f(x: X[int, Concatenate[int, P_2]]) -> str: ... # Accepted def f(x: X[int, [int, bool]]) -> str: ... # Accepted def f(x: X[int, ...]) -> str: ... # Accepted def f(x: X[int, int]) -> str: ... # Rejected By the rules defined above, spelling a concrete instance of a class generic with respect to only a single ParamSpec would require unsightly double brackets. For aesthetic purposes we allow these to be omitted. class Z(Generic[P]): f: Callable[P, int] def f(x: Z[[int, str, bool]]) -> str: ... # Accepted def f(x: Z[int, str, bool]) -> str: ... # Equivalent # Both Z[[int, str, bool]] and Z[int, str, bool] express this: class Z_instantiated: f: Callable[[int, str, bool], int] Semantics The inference rules for the return type of a function invocation whose signature contains a ParamSpec variable are analogous to those around evaluating ones with TypeVars. def changes_return_type_to_str(x: Callable[P, int]) -> Callable[P, str]: ... def returns_int(a: str, b: bool) -> int: ... f = changes_return_type_to_str(returns_int) # f should have the type: # (a: str, b: bool) -> str f("A", True) # Accepted f(a="A", b=True) # Accepted f("A", "A") # Rejected expects_str(f("A", True)) # Accepted expects_int(f("A", True)) # Rejected Just as with traditional TypeVars, a user may include the same ParamSpec multiple times in the arguments of the same function, to indicate a dependency between multiple arguments. In these cases a type checker may choose to solve to a common behavioral supertype (i.e. a set of parameters for which all of the valid calls are valid in both of the subtypes), but is not obligated to do so. P = ParamSpec("P") def foo(x: Callable[P, int], y: Callable[P, int]) -> Callable[P, bool]: ... def x_y(x: int, y: str) -> int: ... def y_x(y: int, x: str) -> int: ... foo(x_y, x_y) # Should return (x: int, y: str) -> bool foo(x_y, y_x) # Could return (__a: int, __b: str) -> bool # This works because both callables have types that are # behavioral subtypes of Callable[[int, str], int] def keyword_only_x(*, x: int) -> int: ... def keyword_only_y(*, y: int) -> int: ... foo(keyword_only_x, keyword_only_y) # Rejected The constructors of user-defined classes generic on ParamSpecs should be evaluated in the same way. U = TypeVar("U") class Y(Generic[U, P]): f: Callable[P, str] prop: U def __init__(self, f: Callable[P, str], prop: U) -> None: self.f = f self.prop = prop def a(q: int) -> str: ... Y(a, 1) # Should resolve to Y[(q: int), int] Y(a, 1).f # Should resolve to (q: int) -> str The semantics of Concatenate[X, Y, P] are that it represents the parameters represented by P with two positional-only parameters prepended. This means that we can use it to represent higher order functions that add, remove or transform a finite number of parameters of a callable. def bar(x: int, *args: bool) -> int: ... def add(x: Callable[P, int]) -> Callable[Concatenate[str, P], bool]: ... add(bar) # Should return (__a: str, x: int, *args: bool) -> bool def remove(x: Callable[Concatenate[int, P], int]) -> Callable[P, bool]: ... remove(bar) # Should return (*args: bool) -> bool def transform( x: Callable[Concatenate[int, P], int] ) -> Callable[Concatenate[str, P], bool]: ... transform(bar) # Should return (__a: str, *args: bool) -> bool This also means that while any function that returns an R can satisfy typing.Callable[P, R], only functions that can be called positionally in their first position with a X can satisfy typing.Callable[Concatenate[X, P], R]. def expects_int_first(x: Callable[Concatenate[int, P], int]) -> None: ... @expects_int_first # Rejected def one(x: str) -> int: ... @expects_int_first # Rejected def two(*, x: int) -> int: ... @expects_int_first # Rejected def three(**kwargs: int) -> int: ... @expects_int_first # Accepted def four(*args: int) -> int: ... There are still some classes of decorators still not supported with these features: - those that add/remove/change a variable number of parameters (for example, functools.partial will remain untypable even after this PEP) - those that add/remove/change keyword-only parameters (See Concatenating Keyword Parameters for more details). The components of a ParamSpec A ParamSpec captures both positional and keyword accessible parameters, but there unfortunately is no object in the runtime that captures both of these together. Instead, we are forced to separate them into *args and **kwargs, respectively. This means we need to be able to split apart a single ParamSpec into these two components, and then bring them back together into a call. To do this, we introduce P.args to represent the tuple of positional arguments in a given call and P.kwargs to represent the corresponding Mapping of keywords to values. Valid use locations These "properties" can only be used as the annotated types for *args and **kwargs, accessed from a ParamSpec already in scope. def puts_p_into_scope(f: Callable[P, int]) -> None: def inner(*args: P.args, **kwargs: P.kwargs) -> None: # Accepted pass def mixed_up(*args: P.kwargs, **kwargs: P.args) -> None: # Rejected pass def misplaced(x: P.args) -> None: # Rejected pass def out_of_scope(*args: P.args, **kwargs: P.kwargs) -> None: # Rejected pass Furthermore, because the default kind of parameter in Python ((x: int)) may be addressed both positionally and through its name, two valid invocations of a (*args: P.args, **kwargs: P.kwargs) function may give different partitions of the same set of parameters. Therefore, we need to make sure that these special types are only brought into the world together, and are used together, so that our usage is valid for all possible partitions. def puts_p_into_scope(f: Callable[P, int]) -> None: stored_args: P.args # Rejected stored_kwargs: P.kwargs # Rejected def just_args(*args: P.args) -> None: # Rejected pass def just_kwargs(**kwargs: P.kwargs) -> None: # Rejected pass Semantics With those requirements met, we can now take advantage of the unique properties afforded to us by this set up: - Inside the function, args has the type P.args, not Tuple[P.args, ...] as would be with a normal annotation (and likewise with the **kwargs) - This special case is necessary to encapsulate the heterogeneous contents of the args/kwargs of a given call, which cannot be expressed by an indefinite tuple/dictionary type. - A function of type Callable[P, R] can be called with (*args, **kwargs) if and only if args has the type P.args and kwargs has the type P.kwargs, and that those types both originated from the same function declaration. - A function declared as def inner(*args: P.args, **kwargs: P.kwargs) -> X has type Callable[P, X]. With these three properties, we now have the ability to fully type check parameter preserving decorators. def decorator(f: Callable[P, int]) -> Callable[P, None]: def foo(*args: P.args, **kwargs: P.kwargs) -> None: f(*args, **kwargs) # Accepted, should resolve to int f(*kwargs, **args) # Rejected f(1, *args, **kwargs) # Rejected return foo # Accepted To extend this to include Concatenate, we declare the following properties: - A function of type Callable[Concatenate[A, B, P], R] can only be called with (a, b, *args, **kwargs) when args and kwargs are the respective components of P, a is of type A and b is of type B. - A function declared as def inner(a: A, b: B, *args: P.args, **kwargs: P.kwargs) -> R has type Callable[Concatenate[A, B, P], R]. Placing keyword-only parameters between the *args and **kwargs is forbidden. def add(f: Callable[P, int]) -> Callable[Concatenate[str, P], None]: def foo(s: str, *args: P.args, **kwargs: P.kwargs) -> None: # Accepted pass def bar(*args: P.args, s: str, **kwargs: P.kwargs) -> None: # Rejected pass return foo # Accepted def remove(f: Callable[Concatenate[int, P], int]) -> Callable[P, None]: def foo(*args: P.args, **kwargs: P.kwargs) -> None: f(1, *args, **kwargs) # Accepted f(*args, 1, **kwargs) # Rejected f(*args, **kwargs) # Rejected return foo Note that the names of the parameters preceding the ParamSpec components are not mentioned in the resulting Concatenate. This means that these parameters can not be addressed via a named argument: def outer(f: Callable[P, None]) -> Callable[P, None]: def foo(x: int, *args: P.args, **kwargs: P.kwargs) -> None: f(*args, **kwargs) def bar(*args: P.args, **kwargs: P.kwargs) -> None: foo(1, *args, **kwargs) # Accepted foo(x=1, *args, **kwargs) # Rejected return bar This is not an implementation convenience, but a soundness requirement. If we were to allow that second calling style, then the following snippet would be problematic. @outer def problem(*, x: object) -> None: pass problem(x="uh-oh") Inside of bar, we would get TypeError: foo() got multiple values for argument 'x'. Requiring these concatenated arguments to be addressed positionally avoids this kind of problem, and simplifies the syntax for spelling these types. Note that this also why we have to reject signatures of the form (*args: P.args, s: str, **kwargs: P.kwargs) (See Concatenating Keyword Parameters for more details). If one of these prepended positional parameters contains a free ParamSpec, we consider that variable in scope for the purposes of extracting the components of that ParamSpec. That allows us to spell things like this: def twice(f: Callable[P, int], *args: P.args, **kwargs: P.kwargs) -> int: return f(*args, **kwargs) + f(*args, **kwargs) The type of twice in the above example is Callable[Concatenate[Callable[P, int], P], int], where P is bound by the outer Callable. This has the following semantics: def a_int_b_str(a: int, b: str) -> int: pass twice(a_int_b_str, 1, "A") # Accepted twice(a_int_b_str, b="A", a=1) # Accepted twice(a_int_b_str, "A", 1) # Rejected Backwards Compatibility The only changes necessary to existing features in typing is allowing these ParamSpec and Concatenate objects to be the first parameter to Callable and to be a parameter to Generic. Currently Callable expects a list of types there and Generic expects single types, so they are currently mutually exclusive. Otherwise, existing code that doesn't reference the new interfaces will be unaffected. Reference Implementation The Pyre type checker supports all of the behavior described above. A reference implementation of the runtime components needed for those uses is provided in the pyre_extensions module. A reference implementation for CPython can be found here. Rejected Alternatives Using List Variadics and Map Variadics We considered just trying to make something like this with a callback protocol which was parameterized on a list-type variadic, and a map-type variadic like so: R = typing.TypeVar(“R”) Tpositionals = ... Tkeywords = ... class BetterCallable(typing.Protocol[Tpositionals, Tkeywords, R]): def __call__(*args: Tpositionals, **kwargs: Tkeywords) -> R: ... However, there are some problems with trying to come up with a consistent solution for those type variables for a given callable. This problem comes up with even the simplest of callables: def simple(x: int) -> None: ... simple <: BetterCallable[[int], [], None] simple <: BetterCallable[[], {“x”: int}, None] BetterCallable[[int], [], None] </: BetterCallable[[], {“x”: int}, None] Any time where a type can implement a protocol in more than one way that aren't mutually compatible, we can run into situations where we lose information. If we were to make a decorator using this protocol, we would have to pick one calling convention to prefer. def decorator( f: BetterCallable[[Ts], [Tmap], int], ) -> BetterCallable[[Ts], [Tmap], str]: def decorated(*args: Ts, **kwargs: Tmap) -> str: x = f(*args, **kwargs) return int_to_str(x) return decorated @decorator def foo(x: int) -> int: return x reveal_type(foo) # Option A: BetterCallable[[int], {}, str] # Option B: BetterCallable[[], {x: int}, str] foo(7) # fails under option B foo(x=7) # fails under option A The core problem here is that, by default, parameters in Python can either be called positionally or as a keyword argument. This means we really have three categories (positional-only, positional-or-keyword, keyword-only) we’re trying to jam into two categories. This is the same problem that we briefly mentioned when discussing .args and .kwargs. Fundamentally, in order to capture two categories when there are some things that can be in either category, we need a higher level primitive (ParamSpec) to capture all three, and then split them out afterward. Defining ParametersOf Another proposal we considered was defining ParametersOf and ReturnType operators which would operate on a domain of a newly defined Function type. Function would be callable with, and only with ParametersOf[F]. ParametersOf and ReturnType would only operate on type variables with precisely this bound. The combination of these three features could express everything that we can express with ParamSpecs. F = TypeVar("F", bound=Function) def no_change(f: F) -> F: def inner( *args: ParametersOf[F].args, **kwargs: ParametersOf[F].kwargs ) -> ReturnType[F]: return f(*args, **kwargs) return inner def wrapping(f: F) -> Callable[ParametersOf[F], List[ReturnType[F]]]: def inner( *args: ParametersOf[F].args, **kwargs: ParametersOf[F].kwargs ) -> List[ReturnType[F]]: return [f(*args, **kwargs)] return inner def unwrapping( f: Callable[ParametersOf[F], List[R]] ) -> Callable[ParametersOf[F], R]: def inner( *args: ParametersOf[F].args, **kwargs: ParametersOf[F].kwargs ) -> R: return f(*args, **kwargs)[0] return inner We decided to go with ParamSpecs over this approach for several reasons: - The footprint of this change would be larger, as we would need two new operators, and a new type, while ParamSpec just introduces a new variable. - Python typing has so far has avoided supporting operators, whether user-defined or built-in, in favor of destructuring. Accordingly, ParamSpec based signatures look much more like existing Python. - The lack of user-defined operators makes common patterns hard to spell. unwrapping is odd to read because F is not actually referring to any callable. It’s just being used as a container for the parameters we wish to propagate. It would read better if we could define an operator RemoveList[List[X]] = X and then unwrapping could take F and return Callable[ParametersOf[F], RemoveList[ReturnType[F]]]. Without that, we unfortunately get into a situation where we have to use a Function-variable as an improvised ParamSpec, in that we never actually bind the return type. In summary, between these two equivalently powerful syntaxes, ParamSpec fits much more naturally into the status quo. Concatenating Keyword Parameters In principle the idea of concatenation as a means to modify a finite number of positional parameters could be expanded to include keyword parameters. def add_n(f: Callable[P, R]) -> Callable[Concatenate[("n", int), P], R]: def inner(*args: P.args, n: int, **kwargs: P.kwargs) -> R: # use n return f(*args, **kwargs) return inner However, the key distinction is that while prepending positional-only parameters to a valid callable type always yields another valid callable type, the same cannot be said for adding keyword-only parameters. As alluded to above , the issue is name collisions. The parameters Concatenate[("n", int), P] are only valid when P itself does not already have a parameter named n. def innocent_wrapper(f: Callable[P, R]) -> Callable[P, R]: def inner(*args: P.args, **kwargs: P.kwargs) -> R: added = add_n(f) return added(*args, n=1, **kwargs) return inner @innocent_wrapper def problem(n: int) -> None: pass Calling problem(2) works fine, but calling problem(n=2) leads to a TypeError: problem() got multiple values for argument 'n' from the call to added inside of innocent_wrapper. This kind of situation could be avoided, and this kind of decorator could be typed if we could reify the constraint that a set of parameters not contain a certain name, with something like: P_without_n = ParamSpec("P_without_n", banned_names=["n"]) def add_n( f: Callable[P_without_n, R] ) -> Callable[Concatenate[("n", int), P_without_n], R]: ... The call to add_n inside of innocent_wrapper could then be rejected since the callable was not guaranteed not to already have a parameter named n. However, enforcing these constraints would require enough additional implementation work that we judged this extension to be out of scope of this PEP. Fortunately the design of ParamSpecs are such that we can return to this idea later if there is sufficient demand. Naming this a ParameterSpecification We decided that ParameterSpecification was a little too long-winded for use here, and that this style of abbreviated name made it look more like TypeVar. Naming this an ArgSpec We think that calling this a ParamSpec is more correct than referring to it as an ArgSpec, since callables have parameters, which are distinct from the arguments which are passed to them in a given call site. A given binding for a ParamSpec is a set of function parameters, not a call-site’s arguments. Acknowledgements Thanks to all of the members of the Pyre team for their comments on early drafts of this PEP, and for their help with the reference implementation. Thanks are also due to the whole Python typing community for their early feedback on this idea at a Python typing meetup, leading directly to the much more compact .args/.kwargs syntax. Copyright This document is placed in the public domain or under the CC0-1.0-Universal license, whichever is more permissive.
python-peps
2024-10-18T13:23:33.051102
2019-12-18T00:00:00
{ "license": "Public Domain", "url": "https://peps.python.org/pep-0612/", "authors": [ "Mark Mendoza" ], "pep_number": "0612", "pandoc_version": "3.5" }
0515
PEP: 515 Title: Underscores in Numeric Literals Version: $Revision$ Last-Modified: $Date$ Author: Georg Brandl, Serhiy Storchaka Status: Final Type: Standards Track Content-Type: text/x-rst Created: 10-Feb-2016 Python-Version: 3.6 Post-History: 10-Feb-2016, 11-Feb-2016 Abstract and Rationale This PEP proposes to extend Python's syntax and number-from-string constructors so that underscores can be used as visual separators for digit grouping purposes in integral, floating-point and complex number literals. This is a common feature of other modern languages, and can aid readability of long literals, or literals whose value should clearly separate into parts, such as bytes or words in hexadecimal notation. Examples: # grouping decimal numbers by thousands amount = 10_000_000.0 # grouping hexadecimal addresses by words addr = 0xCAFE_F00D # grouping bits into nibbles in a binary literal flags = 0b_0011_1111_0100_1110 # same, for string conversions flags = int('0b_1111_0000', 2) Specification The current proposal is to allow one underscore between digits, and after base specifiers in numeric literals. The underscores have no semantic meaning, and literals are parsed as if the underscores were absent. Literal Grammar The production list for integer literals would therefore look like this: integer: decinteger | bininteger | octinteger | hexinteger decinteger: nonzerodigit (["_"] digit)* | "0" (["_"] "0")* bininteger: "0" ("b" | "B") (["_"] bindigit)+ octinteger: "0" ("o" | "O") (["_"] octdigit)+ hexinteger: "0" ("x" | "X") (["_"] hexdigit)+ nonzerodigit: "1"..."9" digit: "0"..."9" bindigit: "0" | "1" octdigit: "0"..."7" hexdigit: digit | "a"..."f" | "A"..."F" For floating-point and complex literals: floatnumber: pointfloat | exponentfloat pointfloat: [digitpart] fraction | digitpart "." exponentfloat: (digitpart | pointfloat) exponent digitpart: digit (["_"] digit)* fraction: "." digitpart exponent: ("e" | "E") ["+" | "-"] digitpart imagnumber: (floatnumber | digitpart) ("j" | "J") Constructors Following the same rules for placement, underscores will be allowed in the following constructors: - int() (with any base) - float() - complex() - Decimal() Further changes The new-style number-to-string formatting language will be extended to allow _ as a thousands separator, where currently only , is supported. This can be used to easily generate code with more readable literals.[1] The syntax would be the same as for the comma, e.g. {:10_} for a width of 10 with _ separator. For the b, x and o format specifiers, _ will be allowed and group by 4 digits. Prior Art Those languages that do allow underscore grouping implement a large variety of rules for allowed placement of underscores. In cases where the language spec contradicts the actual behavior, the actual behavior is listed. ("single" or "multiple" refer to allowing runs of consecutive underscores.) - Ada: single, only between digits[2] - C# (open proposal for 7.0): multiple, only between digits[3] - C++14: single, between digits (different separator chosen)[4] - D: multiple, anywhere, including trailing[5] - Java: multiple, only between digits[6] - Julia: single, only between digits (but not in float exponent parts) [7] - Perl 5: multiple, basically anywhere, although docs say it's restricted to one underscore between digits[8] - Ruby: single, only between digits (although docs say "anywhere") [9] - Rust: multiple, anywhere, except for between exponent "e" and digits [10] - Swift: multiple, between digits and trailing (although textual description says only "between digits")[11] Alternative Syntax Underscore Placement Rules Instead of the relatively strict rule specified above, the use of underscores could be less limited. As seen in other languages, common rules include: - Only one consecutive underscore allowed, and only between digits. - Multiple consecutive underscores allowed, but only between digits. - Multiple consecutive underscores allowed, in most positions except for the start of the literal, or special positions like after a decimal point. The syntax in this PEP has ultimately been selected because it covers the common use cases, and does not allow for syntax that would have to be discouraged in style guides anyway. A less common rule would be to allow underscores only every N digits (where N could be 3 for decimal literals, or 4 for hexadecimal ones). This is unnecessarily restrictive, especially considering the separator placement is different in different cultures. Different Separators A proposed alternate syntax was to use whitespace for grouping. Although strings are a precedent for combining adjoining literals, the behavior can lead to unexpected effects which are not possible with underscores. Also, no other language is known to use this rule, except for languages that generally disregard any whitespace. C++14 introduces apostrophes for grouping (because underscores introduce ambiguity with user-defined literals), which is not considered because of the use in Python's string literals.[12] Implementation A preliminary patch that implements the specification given above has been posted to the issue tracker.[13] References Copyright This document has been placed in the public domain. Local Variables: mode: indented-text indent-tabs-mode: nil sentence-end-double-space: t fill-column: 70 coding: utf-8 End: [1] https://mail.python.org/pipermail/python-dev/2016-February/143283.html [2] http://archive.adaic.com/standards/83lrm/html/lrm-02-04.html#2.4 [3] https://github.com/dotnet/roslyn/issues/216 [4] http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2013/n3499.html [5] https://dlang.org/spec/lex.html#integerliteral [6] https://docs.oracle.com/javase/7/docs/technotes/guides/language/underscores-literals.html [7] https://web.archive.org/web/20160223175334/http://docs.julialang.org/en/release-0.4/manual/integers-and-floating-point-numbers/ [8] https://perldoc.perl.org/perldata#Scalar-value-constructors [9] https://ruby-doc.org/core-2.3.0/doc/syntax/literals_rdoc.html#label-Numbers [10] https://web.archive.org/web/20160304121349/http://doc.rust-lang.org/reference.html#integer-literals [11] https://docs.swift.org/swift-book/ReferenceManual/LexicalStructure.html [12] http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2013/n3499.html [13] http://bugs.python.org/issue26331
python-peps
2024-10-18T13:23:33.064903
2016-02-10T00:00:00
{ "license": "Public Domain", "url": "https://peps.python.org/pep-0515/", "authors": [ "Georg Brandl", "Serhiy Storchaka" ], "pep_number": "0515", "pandoc_version": "3.5" }
0328
PEP: 328 Title: Imports: Multi-Line and Absolute/Relative Version: $Revision$ Last-Modified: $Date$ Author: Aahz <[email protected]> Status: Final Type: Standards Track Content-Type: text/x-rst Created: 21-Dec-2003 Python-Version: 2.4, 2.5, 2.6 Post-History: 08-Mar-2004 Abstract The import statement has two problems: - Long import statements can be difficult to write, requiring various contortions to fit Pythonic style guidelines. - Imports can be ambiguous in the face of packages; within a package, it's not clear whether import foo refers to a module within the package or some module outside the package. (More precisely, a local module or package can shadow another hanging directly off sys.path.) For the first problem, it is proposed that parentheses be permitted to enclose multiple names, thus allowing Python's standard mechanisms for multi-line values to apply. For the second problem, it is proposed that all import statements be absolute by default (searching sys.path only) with special syntax (leading dots) for accessing package-relative imports. Timeline In Python 2.5, you must enable the new absolute import behavior with : from __future__ import absolute_import You may use relative imports freely. In Python 2.6, any import statement that results in an intra-package import will raise DeprecationWarning (this also applies to from <> import that fails to use the relative import syntax). Rationale for Parentheses Currently, if you want to import a lot of names from a module or package, you have to choose one of several unpalatable options: - Write a long line with backslash continuations: from Tkinter import Tk, Frame, Button, Entry, Canvas, Text, \ LEFT, DISABLED, NORMAL, RIDGE, END - Write multiple import statements: from Tkinter import Tk, Frame, Button, Entry, Canvas, Text from Tkinter import LEFT, DISABLED, NORMAL, RIDGE, END (import * is not an option ;-) Instead, it should be possible to use Python's standard grouping mechanism (parentheses) to write the import statement: from Tkinter import (Tk, Frame, Button, Entry, Canvas, Text, LEFT, DISABLED, NORMAL, RIDGE, END) This part of the proposal had BDFL approval from the beginning. Parentheses support was added to Python 2.4. Rationale for Absolute Imports In Python 2.4 and earlier, if you're reading a module located inside a package, it is not clear whether : import foo refers to a top-level module or to another module inside the package. As Python's library expands, more and more existing package internal modules suddenly shadow standard library modules by accident. It's a particularly difficult problem inside packages because there's no way to specify which module is meant. To resolve the ambiguity, it is proposed that foo will always be a module or package reachable from sys.path. This is called an absolute import. The python-dev community chose absolute imports as the default because they're the more common use case and because absolute imports can provide all the functionality of relative (intra-package) imports -- albeit at the cost of difficulty when renaming package pieces higher up in the hierarchy or when moving one package inside another. Because this represents a change in semantics, absolute imports will be optional in Python 2.5 and 2.6 through the use of : from __future__ import absolute_import This part of the proposal had BDFL approval from the beginning. Rationale for Relative Imports With the shift to absolute imports, the question arose whether relative imports should be allowed at all. Several use cases were presented, the most important of which is being able to rearrange the structure of large packages without having to edit sub-packages. In addition, a module inside a package can't easily import itself without relative imports. Guido approved of the idea of relative imports, but there has been a lot of disagreement on the spelling (syntax). There does seem to be agreement that relative imports will require listing specific names to import (that is, import foo as a bare term will always be an absolute import). Here are the contenders: - One from Guido: from .foo import bar and : from ...foo import bar These two forms have a couple of different suggested semantics. One semantic is to make each dot represent one level. There have been many complaints about the difficulty of counting dots. Another option is to only allow one level of relative import. That misses a lot of functionality, and people still complained about missing the dot in the one-dot form. The final option is to define an algorithm for finding relative modules and packages; the objection here is "Explicit is better than implicit". (The algorithm proposed is "search up from current package directory until the ultimate package parent gets hit".) Some people have suggested other punctuation as the separator, such as "-" or "^". Some people have suggested using "*": from *.foo import bar - The next set of options is conflated from several posters: from __pkg__.__pkg__ import and : from .__parent__.__parent__ import Many people (Guido included) think these look ugly, but they are clear and explicit. Overall, more people prefer __pkg__ as the shorter option. - One suggestion was to allow only sibling references. In other words, you would not be able to use relative imports to refer to modules higher in the package tree. You would then be able to do either : from .spam import eggs or : import .spam.eggs - Some people favor allowing indexed parents: from -2.spam import eggs In this scenario, importing from the current directory would be a simple : from .spam import eggs - Finally, some people dislike the way you have to change import to from ... import when you want to dig inside a package. They suggest completely rewriting the import syntax: from MODULE import NAMES as RENAME searching HOW or : import NAMES as RENAME from MODULE searching HOW [from NAMES] [in WHERE] import ... However, this most likely could not be implemented for Python 2.5 (too big a change), and allowing relative imports is sufficiently critical that we need something now (given that the standard import will change to absolute import). More than that, this proposed syntax has several open questions: - What is the precise proposed syntax? (Which clauses are optional under which circumstances?) - How strongly does the searching clause bind? In other words, do you write: import foo as bar searching XXX, spam as ham searching XXX or: import foo as bar, spam as ham searching XXX Guido's Decision Guido has Pronounced[1] that relative imports will use leading dots. A single leading dot indicates a relative import, starting with the current package. Two or more leading dots give a relative import to the parent(s) of the current package, one level per dot after the first. Here's a sample package layout: package/ __init__.py subpackage1/ __init__.py moduleX.py moduleY.py subpackage2/ __init__.py moduleZ.py moduleA.py Assuming that the current file is either moduleX.py or subpackage1/__init__.py, following are correct usages of the new syntax: from .moduleY import spam from .moduleY import spam as ham from . import moduleY from ..subpackage1 import moduleY from ..subpackage2.moduleZ import eggs from ..moduleA import foo from ...package import bar from ...sys import path Note that while that last case is legal, it is certainly discouraged ("insane" was the word Guido used). Relative imports must always use from <> import; import <> is always absolute. Of course, absolute imports can use from <> import by omitting the leading dots. The reason import .foo is prohibited is because after : import XXX.YYY.ZZZ then : XXX.YYY.ZZZ is usable in an expression. But : .moduleY is not usable in an expression. Relative Imports and __name__ Relative imports use a module's __name__ attribute to determine that module's position in the package hierarchy. If the module's name does not contain any package information (e.g. it is set to '__main__') then relative imports are resolved as if the module were a top level module, regardless of where the module is actually located on the file system. Relative Imports and Indirection Entries in sys.modules When packages were introduced, the concept of an indirection entry in sys.modules came into existence[2]. When an entry in sys.modules for a module within a package had a value of None, it represented that the module actually referenced the top-level module. For instance, 'Sound.Effects.string' might have a value of None in sys.modules. That meant any import that resolved to that name actually was to import the top-level 'string' module. This introduced an optimization for when a relative import was meant to resolve to an absolute import. But since this PEP makes a very clear delineation between absolute and relative imports, this optimization is no longer needed. When absolute/relative imports become the only import semantics available then indirection entries in sys.modules will no longer be supported. References For more background, see the following python-dev threads: - Re: Christmas Wishlist - Re: Python-Dev Digest, Vol 5, Issue 57 - Relative import - Another Strategy for Relative Import Copyright This document has been placed in the public domain. Local Variables: mode: indented-text indent-tabs-mode: nil sentence-end-double-space: t fill-column: 70 End: [1] https://mail.python.org/pipermail/python-dev/2004-March/043739.html [2] https://www.python.org/doc/essays/packages/
python-peps
2024-10-18T13:23:33.079725
2003-12-21T00:00:00
{ "license": "Public Domain", "url": "https://peps.python.org/pep-0328/", "authors": [ "Aahz" ], "pep_number": "0328", "pandoc_version": "3.5" }
0281
PEP: 281 Title: Loop Counter Iteration with range and xrange Author: Magnus Lie Hetland <[email protected]> Status: Rejected Type: Standards Track Content-Type: text/x-rst Created: 11-Feb-2002 Python-Version: 2.3 Post-History: Abstract This PEP describes yet another way of exposing the loop counter in for-loops. It basically proposes that the functionality of the function indices() from PEP 212 be included in the existing functions range() and xrange(). Pronouncement In commenting on PEP 279's enumerate() function, this PEP's author offered, "I'm quite happy to have it make PEP 281 obsolete." Subsequently, PEP 279 was accepted into Python 2.3. On 17 June 2005, the BDFL concurred with it being obsolete and hereby rejected the PEP. For the record, he found some of the examples to somewhat jarring in appearance: >>> range(range(5), range(10), range(2)) [5, 7, 9] Motivation It is often desirable to loop over the indices of a sequence. PEP 212 describes several ways of doing this, including adding a built-in function called indices, conceptually defined as: def indices(sequence): return range(len(sequence)) On the assumption that adding functionality to an existing built-in function may be less intrusive than adding a new built-in function, this PEP proposes adding this functionality to the existing functions range() and xrange(). Specification It is proposed that all three arguments to the built-in functions range() and xrange() are allowed to be objects with a length (i.e. objects implementing the __len__ method). If an argument cannot be interpreted as an integer (i.e. it has no __int__ method), its length will be used instead. Examples: >>> range(range(10)) [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] >>> range(range(5), range(10)) [5, 6, 7, 8, 9] >>> range(range(5), range(10), range(2)) [5, 7, 9] >>> list(xrange(range(10))) [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] >>> list(xrange(xrange(10))) [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] # Number the lines of a file: lines = file.readlines() for num in range(lines): print num, lines[num] Alternatives A natural alternative to the above specification is allowing xrange() to access its arguments in a lazy manner. Thus, instead of using their length explicitly, xrange can return one index for each element of the stop argument until the end is reached. A similar lazy treatment makes little sense for the start and step arguments since their length must be calculated before iteration can begin. (Actually, the length of the step argument isn't needed until the second element is returned.) A pseudo-implementation (using only the stop argument, and assuming that it is iterable) is: def xrange(stop): i = 0 for x in stop: yield i i += 1 Testing whether to use int() or lazy iteration could be done by checking for an __iter__ attribute. (This example assumes the presence of generators, but could easily have been implemented as a plain iterator object.) It may be questionable whether this feature is truly useful, since one would not be able to access the elements of the iterable object inside the for loop through indexing. Example: # Printing the numbers of the lines of a file: for num in range(file): print num # The line itself is not accessible A more controversial alternative (to deal with this) would be to let range() behave like the function irange() of PEP 212 when supplied with a sequence. Example: >>> range(5) [0, 1, 2, 3, 4] >>> range('abcde') [(0, 'a'), (1, 'b'), (2, 'c'), (3, 'd'), (4, 'e')] Backwards Compatibility The proposal could cause backwards incompatibilities if arguments are used which implement both __int__ and __len__ (or __iter__ in the case of lazy iteration with xrange). The author does not believe that this is a significant problem. Copyright This document has been placed in the public domain.
python-peps
2024-10-18T13:23:33.086092
2002-02-11T00:00:00
{ "license": "Public Domain", "url": "https://peps.python.org/pep-0281/", "authors": [ "Magnus Lie Hetland" ], "pep_number": "0281", "pandoc_version": "3.5" }
0600
PEP: 600 Title: Future 'manylinux' Platform Tags for Portable Linux Built Distributions Version: $Revision$ Last-Modified: $Date$ Author: Nathaniel J. Smith <[email protected]>, Thomas Kluyver <[email protected]> Sponsor: Paul Moore <[email protected]> BDFL-Delegate: Paul Moore <[email protected]> Discussions-To: https://discuss.python.org/t/the-next-manylinux-specification/1043 Status: Final Type: Standards Track Topic: Packaging Content-Type: text/x-rst Created: 03-May-2019 Post-History: 03-May-2019 Replaces: 513, 571, 599 Resolution: https://discuss.python.org/t/pep-600-future-manylinux-platform-tags-for-portable-linux-built-distributions/2414/27 Abstract This PEP proposes a scheme for new 'manylinux' wheel tags to be defined without requiring a PEP for every specific tag, similar to how Windows and macOS tags already work. This will allow package maintainers to take advantage of new tags more quickly, while making better use of limited volunteer time. Non-goals include: handling non-glibc-based platforms; integrating with external package managers or handling external dependencies such as CUDA; making manylinux tags more sophisticated than their Windows/macOS equivalents; doing anything besides taking our existing tried-and-tested approach and streamlining it. These are important issues and other PEPs may address them in the future, but for this PEP they're out of scope. Rationale Python users appreciate it when PyPI has pre-compiled packages for their platform, because it makes installation fast and simple. But distributing pre-compiled binaries on Linux is challenging because of the diversity of Linux-based platforms. For example, Debian, Android, and Alpine all use the Linux kernel, but with radically different userspace libraries, which makes it difficult or impossible to create a single wheel that works on all three. This complexity has caused many previous discussions of Linux wheels to stall out. The "manylinux" project succeeded by adopting a strategy of ruthless pragmatism. We chose a large but tractable set of Linux platforms – specifically, mainstream glibc-based distributions like Debian, OpenSuSE, Ubuntu, RHEL, etc. – and then we did whatever it takes to make wheels that work across all these platforms. This approach requires many compromises. Manylinux wheels can only rely on external libraries that maintain a consistent ABI and are universally available across all these distributions, which in practice restricts them to a small set of core libraries like glibc and a few others. Wheels have to be built on carefully-chosen platforms of the oldest possible vintage, using a Python that is itself built in a carefully-chosen configuration. Other shared library dependencies have to be bundled into the wheel, which requires a complex process to avoid collisions between unrelated wheels. And finally, the details of these requirements change over time, as new distro versions are released, and old ones fall out of use. It turns out that these requirements are not too onerous: they're essentially equivalent to what you have to do to ship Windows or macOS wheels, and the manylinux approach has achieved substantial uptake among both package maintainers and end-users. But any manylinux PEP needs some way to address these complexities. In previous manylinux PEPs (PEP 513, PEP 571, PEP 599), we've done this by attempting to write down in the PEP the exact set of libraries, symbol versions, Python configuration, etc. that we believed would lead to wheels that work on all mainstream glibc-based Linux systems. But this created several problems: First, PEPs are generally supposed to be normative references: if software doesn't match the PEP, then we fix the software. But in this case, the PEPs are attempting to describe Linux distributions, which are a moving target, and do not consider our PEPs to constrain their behavior. This means that we've been taking on an unbounded commitment to keep updating every manylinux PEP whenever the Linux distro landscape changes. This is a substantial commitment for unfunded volunteers to take on, and it's not clear that this work produces value for our users. And second, every time we move manylinux forward to a newer range of supported platforms, or add support for a new architecture, we have to go through a fairly elaborate process: writing a new PEP, updating the PyPI and pip codebases to recognize the new tag, waiting for the new pip to percolate to users, etc. None of this happens on Windows/macOS; it's only a tax on Linux maintainers. This slows deployment of new manylinux versions, and consumes part of our community's limited PEP review bandwidth, thus slowing progress of the Python packaging ecosystem as a whole. This is especially problematic for less-popular architectures, who have less volunteer resources to overcome these barriers. How can we fix it? A manylinux PEP has to address three main audiences: - Package installers, like pip, need to be able to determine which wheel tags are compatible with the system they find themselves running on. This requires some automated process to introspect the system and match it up with wheel tags. - Package indexes, like PyPI, need to be able to validate which wheel tags are valid. Generally, this just requires something like a list of valid tags, or regex they match, with no need to know anything about the actual semantics for individual tags. (But see the discussion of upload verification below.) - Package maintainers need to be able to build wheels that meet the requirements for a given wheel tag. Here's the key insight behind this new PEP: it's crucial that different package installers and package indexes all agree on which manylinux tags are valid and which systems they install on, so we need a PEP to specify these – but, these are straightforward, and don't really change between manylinux versions. The complicated part that keeps changing is the process of actually building the wheels – but, if there are multiple competing build environments, it doesn't matter whether they use exactly the same rules as each other, as long as they all produce wheels that work on end-user systems. Therefore, we don't need an interoperability standard for building wheels, so we don't need to write the details into a PEP. To further convince ourselves that this approach will work, let's look again at how we handle wheels on Windows and macOS: the PEPs describe which tags are valid, and which systems they're supposed to work on, but not how to actually build wheels for those platforms. And in practice, if you want to distribute Windows or macOS wheels, you might have to jump through some complicated and poorly documented hoops in order to bundle dependencies, target the right range of OS versions, etc. But the system works, and the way to improve it is to write better docs and build better tooling; no-one thinks that the way to make Windows wheels work better is to publish a PEP describing which symbols we think Microsoft should be including in their libraries and how their linker ought to work. This PEP extends that philosophy to manylinux as well. Specification Core definition Tags using the new scheme will look like: manylinux_2_17_x86_64 Or more generally: manylinux_${GLIBCMAJOR}_${GLIBCMINOR}_${ARCH} This tag is a promise: the wheel's creator promises that the wheel will work on any mainstream Linux distro that uses glibc version ${GLIBCMAJOR}.${GLIBCMINOR} or later, and where the ${ARCH} matches the return value from distutils.util.get_platform(). (For more detail about architecture tags, see PEP 425.) If a user installs this wheel into an environment that matches these requirements and it doesn't work, then that wheel does not comply with this specification. This should be considered a bug in the wheel, and it's the wheel creator's responsibility to look for a fix (possibly with the help of the broader community). The word "mainstream" is intentionally somewhat vague, and should be interpreted expansively. The goal is to rule out weird homebrew Linux systems; generally any distro you've actually heard of should be considered "mainstream". We also provide a way for maintainers of "weird" distros to manually override this check, though based on experience with previous manylinux PEPs, we don't expect this feature to see much use. And finally, compliant wheels are required to "play well with others", i.e., installing a manylinux wheel must not cause other unrelated packages to break. Any method of producing wheels which meets these criteria is acceptable. However, in practice we expect that the auditwheel project will maintain an up-to-date set of tools and build images for producing manylinux wheels, as well as documentation about how they work and how to use them, and that most maintainers will want to use those. For the latest information on building manylinux wheels, including recommendations about which build images to use, see https://packaging.python.org. Since these requirements are fairly high-level, here are some examples of how they play out in specific situations: Example: if a wheel is tagged as manylinux_2_17_x86_64, but it uses symbols that were only added in glibc 2.18, then that wheel won't work on systems with glibc 2.17. Therefore, we can conclude that this wheel is in violation of this specification. Example: Until ~2017, all major Linux distros included libncursesw.so.5 as part of their default install. Until that date, a wheel that linked to libncursesw.so.5 was compliant with this specification. Then, distros started switching to ncurses 6, which has a different name and incompatible ABI, and stopped installing libncursesw.so.5 by default. So after that date, a wheel that links to libncursesw.so.5 was no longer compliant with this specification. Example: The Linux ELF linker places all shared library SONAMEs into a single process-global namespace. If independent wheels used the same SONAME for their bundled libraries, they might end up colliding and using the wrong library version, which would violate the "play well with others" rule. Therefore, this specification requires that wheels use globally-unique names for all bundled libraries. (Auditwheel currently accomplishes this by renaming all bundled libraries to include a globally-unique hash.) Example: we've observed certain wheels using C++ in ways that interfere with other packages via an unclear mechanism. This is also a violation of the "play well with others" rule, so those wheels aren't compliant with this specification. Example: The imaginary architecture LEG v7 has both big-endian and little-endian variants. Big-endian binaries require a big-endian system, and little-endian binaries require a little-endian system. But unfortunately, it's discovered that due to a bug in PEP 425, both variants use the same architecture tag, legv7. This makes it impossible to create a compliant manylinux_2_17_legv7 wheel: no matter what we do, it will crash on some user's systems. So, we write a new PEP defining architecture tags legv7le and legv7be; now we can ship manylinux LEG v7 wheels. Example: There's also a LEG v8. It also has big-endian and little-endian variants. But fortunately, it turns out that PEP 425 already does the right thing LEG v8, so LEG v8 enthusiasts can start shipping manylinux_2_17_legv8le and manylinux_2_17_legv8be wheels immediately once this PEP is implemented, even though the authors of this PEP don't know anything at all about LEG v8. Legacy manylinux tags The existing manylinux tags are redefined as aliases for new-style tags: - manylinux1_x86_64 is now an alias for manylinux_2_5_x86_64 - manylinux1_i686 is now an alias for manylinux_2_5_i686 - manylinux2010_x86_64 is now an alias for manylinux_2_12_x86_64 - manylinux2010_i686 is now an alias for manylinux_2_12_i686 - manylinux2014_x86_64 is now an alias for manylinux_2_17_x86_64 - manylinux2014_i686 is now an alias for manylinux_2_17_i686 - manylinux2014_aarch64 is now an alias for manylinux_2_17_aarch64 - manylinux2014_armv7l is now an alias for manylinux_2_17_armv7l - manylinux2014_ppc64 is now an alias for manylinux_2_17_ppc64 - manylinux2014_ppc64le is now an alias for manylinux_2_17_ppc64le - manylinux2014_s390x is now an alias for manylinux_2_17_s390x This redefinition is largely a no-op, but does affect a few things: - Previously, we had an open-ended and growing commitment to keep updating every manylinux PEP whenever a new Linux distro was released, for the rest of time. By making this PEP normative for the older tags, that obligation goes away. When this PEP is accepted, the previous manylinux PEPs will receive a final update noting that they are no longer maintained and referring to this PEP. - The "play well with others" rule was always intended, but previous PEPs didn't state it explicitly; now it's explicit. - Previous PEPs assumed that glibc 3.x might be incompatible with glibc 2.x, so we checked for compatibility between a system and a tag using logic like: sys_major == tag_major and sys_minor >= tag_minor Recently the glibc maintainers advised us that we should assume that glibc will maintain backwards-compatibility indefinitely, even if they bump the major version number. So the new check for compatibility is: (sys_major, sys_minor) >= (tag_major, tag_minor) Package installers Generally, package installers should install manylinux wheels on systems that have an appropriate glibc and architecture, and not otherwise. If there are multiple compatible manylinux wheels available, then the wheel with the highest glibc version should be preferred, in order to take advantage of newer compilers and glibc features. In addition, we follow previous specifications, and allow for Python distributors to manually override this check by adding a _manylinux module to their standard library. If this package is importable, and if it defines a function called manylinux_compatible, then package installers should call this function, passing in the major version, minor version, and architecture from the manylinux tag, and it will either return a boolean saying whether wheels with the given tag should be considered compatible with the current system, or else None to indicate that the default logic should be used. For compatibility with previous specifications, if the tag is manylinux1 or manylinux_2_5 exactly, then we also check the module for a boolean attribute manylinux1_compatible, if the tag version is manylinux2010 or manylinux_2_12 exactly, then we also check the module for a boolean attribute manylinux2010_compatible, and if the tag version is manylinux2014 or manylinux_2_17 exactly, then we also check the module for a boolean attribute manylinux2014_compatible. If both the new and old attributes are defined, then manylinux_compatible takes precedence. Here's some example code. You don't have to actually use this code, but you can use it for reference if you have questions about the exact semantics: LEGACY_ALIASES = { "manylinux1_x86_64": "manylinux_2_5_x86_64", "manylinux1_i686": "manylinux_2_5_i686", "manylinux2010_x86_64": "manylinux_2_12_x86_64", "manylinux2010_i686": "manylinux_2_12_i686", "manylinux2014_x86_64": "manylinux_2_17_x86_64", "manylinux2014_i686": "manylinux_2_17_i686", "manylinux2014_aarch64": "manylinux_2_17_aarch64", "manylinux2014_armv7l": "manylinux_2_17_armv7l", "manylinux2014_ppc64": "manylinux_2_17_ppc64", "manylinux2014_ppc64le": "manylinux_2_17_ppc64le", "manylinux2014_s390x": "manylinux_2_17_s390x", } def manylinux_tag_is_compatible_with_this_system(tag): # Normalize and parse the tag tag = LEGACY_ALIASES.get(tag, tag) m = re.match("manylinux_([0-9]+)_([0-9]+)_(.*)", tag) if not m: return False tag_major_str, tag_minor_str, tag_arch = m.groups() tag_major = int(tag_major_str) tag_minor = int(tag_minor_str) if not system_uses_glibc(): return False sys_major, sys_minor = get_system_glibc_version() if (sys_major, sys_minor) < (tag_major, tag_minor): return False sys_arch = get_system_arch() if sys_arch != tag_arch: return False # Check for manual override try: import _manylinux except ImportError: pass else: if hasattr(_manylinux, "manylinux_compatible"): result = _manylinux.manylinux_compatible( tag_major, tag_minor, tag_arch, ) if result is not None: return bool(result) else: if (tag_major, tag_minor) == (2, 5): if hasattr(_manylinux, "manylinux1_compatible"): return bool(_manylinux.manylinux1_compatible) if (tag_major, tag_minor) == (2, 12): if hasattr(_manylinux, "manylinux2010_compatible"): return bool(_manylinux.manylinux2010_compatible) return True Package indexes The exact set of wheel tags accepted by PyPI, or any package index, is a policy question, and up to the maintainers of that index. But, we recommend that package indexes accept any wheels whose platform tag matches the following regexes: - manylinux1_(x86_64|i686) - manylinux2010_(x86_64|i686) - manylinux2014_(x86_64|i686|aarch64|armv7l|ppc64|ppc64le|s390x) - manylinux_[0-9]+_[0-9]+_(.*) Package indexes may impose additional requirements; for example, they might audit uploaded wheels and reject those that contain known problems, such as a manylinux_2_17 wheel that references symbols from later glibc versions, or dependencies on external libraries that are known not to exist on all systems. Or a package index might decide to be conservative and reject wheels tagged manylinux_2_999, on the grounds that no-one knows what the Linux distro landscape will look like when glibc 2.999 is released. We leave the details of any such checks to the discretion of the package index maintainers. Rejected alternatives Continuing the manylinux20XX series: As discussed above, this leads to much more effort-intensive, slower, and more complex rollouts of new versions. And while there are two places where it seems at first to have some compensating benefits, if you look more closely this turns out not to be the case. First, this forces us to produce human-readable descriptions of how Linux distros work, in the text of the PEP. But this is less valuable than it might seem at first, and can actually be handled better by the new "perennial" approach anyway. If you're trying to build wheels, the main thing you need is a tutorial on how to use the build images and tooling around them. If you're trying to add support for a new build profile or create a competitor to auditwheel, then your best resources will be the auditwheel source code and issue tracker, which are always going to be more detailed, precise, and reliable than a summary spec written in English and without tests. Documentation like the old manylinux20XX PEPs does add value! But in both cases, it's primarily as a secondary reference to provide overview and context. And furthermore, the PEP process is poorly suited to maintaining this kind of reference documentation – there's a reason we don't keep the pip user manual in the PEPs repository! The auditwheel maintainers are the best situated to understand what kinds of documentation are useful to their users, and to maintain that documentation over time. For example, there's substantial overlap between the different manylinux versions, and the PEP process currently forces us to handle this by copy-pasting everything between a growing list of documents; instead, the auditwheel maintainers might choose to factor out the common parts into a single piece of shared documentation. A related concern was that with the perennial approach, it may become harder for package maintainers to decide which build profile to target: instead of having to pick between manylinux1, manylinux2010, manylinux2014, ..., they now have a wider array of options like manylinux_2_5, manylinux_2_6, ..., manylinux_2_20, ... But again, we don't believe this will be a problem in practice. In either system, most package maintainers won't be starting by reading PEPs and trying to implement them from scratch. If you're a particularly expert and ambitious package maintainer who needs to target a new version or new architecture, the perennial approach gives you additional flexibility. But for regular everyday maintainers, we expect they'll start from a tutorial like packaging.python.org, and by choosing from existing build images. A tutorial can just as easily recommend manylinux_2_17 as it can recommend manylinux2014, and we expect the actual set of pre-provided build images to be identical in both cases. And again, by maintaining this documentation in the right place, instead of trying to do it PEPs repository, we expect that we'll end up with documentation that's higher-quality and more fitted to purpose. Finally, some participants have pointed out that it's very nice to be able to look at a wheel and tell definitively whether it meets the requirements of the spec. With the new "perennial" approach, we can never say with 100% certainty that a wheel does meet the spec, because that depends on the Linux distros. As engineers we have a well-justified dislike for that kind of uncertainty. However: as demonstrated by the examples above, we can still tell definitively when a wheel doesn't meet the spec, which turns out to be what's important in practice. And, in practice, with the manylinux20XX approach, whenever distros change, we actually change the spec; it takes a bit longer. So even if a wheel was compliant today, it might be become non-compliant tomorrow. This is frustrating, but unfortunately this uncertainty is unavoidable if what you care about is distributing working wheels to users. So even on these points where the old approach initially seems to have advantages, we expect the new approach to actually do as well or better. Switching to perennial tags, but continuing to write a PEP for each version: This was proposed as a kind of hybrid, to try to get some of the advantages of the perennial tagging system – like easier rollouts of new versions – while keeping the advantages of the manylinux20XX scheme, like forcing us to write documentation about Linux distros, simplifying options for package maintainers, and being able to definitively tell when a wheel meets the spec. But as discussed above, on a closer look, it turns out that these advantages are largely illusory. And this also inherits significant disadvantages from the manylinux20XX scheme, like creating indefinite obligations to update a growing list of copy-pasted PEPs. Making auditwheel normative: Another possibility that was considered was to make auditwheel the normative reference on the definition of manylinux, i.e., a wheel would be compliant if and only if auditwheel check completed without errors. This was rejected because the point of packaging PEPs is to define interoperability between tools, not to bless specific tools. Adding extra words to the tag string: Another proposal we considered was to add extra words to the wheel tag, e.g. manylinux_glibc_2_17 instead of manylinux_2_17. The motivation would be to leave the door open to other kinds of versioning heuristics in the future – for example, we could have manylinux_glibc_$VERSION and manylinux_alpine_$VERSION. But "manylinux" has always been a synonym for "broad compatibility with mainstream glibc-based distros"; reusing it for unrelated build profiles like alpine is more confusing than helpful. Also, some early reviewers who aren't steeped in the details of packaging found the word glibc actively misleading, jumping to the conclusion that it meant they needed a system with exactly that glibc version. And tags like manylinux_$VERSION and alpine_$VERSION also have the advantages of parsimony and directness. So we'll go with that.
python-peps
2024-10-18T13:23:33.106324
2019-05-03T00:00:00
{ "license": "Public Domain", "url": "https://peps.python.org/pep-0600/", "authors": [ "Nathaniel J. Smith" ], "pep_number": "0600", "pandoc_version": "3.5" }
0544
PEP: 544 Title: Protocols: Structural subtyping (static duck typing) Author: Ivan Levkivskyi <[email protected]>, Jukka Lehtosalo <[email protected]>, Łukasz Langa <[email protected]> BDFL-Delegate: Guido van Rossum <[email protected]> Discussions-To: [email protected] Status: Final Type: Standards Track Topic: Typing Created: 05-Mar-2017 Python-Version: 3.8 Resolution: https://mail.python.org/archives/list/[email protected]/message/FDO4KFYWYQEP3U2HVVBEBR3SXPHQSHYR/ typing:protocols and typing.Protocol Abstract Type hints introduced in PEP 484 can be used to specify type metadata for static type checkers and other third party tools. However, PEP 484 only specifies the semantics of nominal subtyping. In this PEP we specify static and runtime semantics of protocol classes that will provide a support for structural subtyping (static duck typing). Rationale and Goals Currently, PEP 484 and the typing module [typing] define abstract base classes for several common Python protocols such as Iterable and Sized. The problem with them is that a class has to be explicitly marked to support them, which is unpythonic and unlike what one would normally do in idiomatic dynamically typed Python code. For example, this conforms to PEP 484: from typing import Sized, Iterable, Iterator class Bucket(Sized, Iterable[int]): ... def __len__(self) -> int: ... def __iter__(self) -> Iterator[int]: ... The same problem appears with user-defined ABCs: they must be explicitly subclassed or registered. This is particularly difficult to do with library types as the type objects may be hidden deep in the implementation of the library. Also, extensive use of ABCs might impose additional runtime costs. The intention of this PEP is to solve all these problems by allowing users to write the above code without explicit base classes in the class definition, allowing Bucket to be implicitly considered a subtype of both Sized and Iterable[int] by static type checkers using structural [wiki-structural] subtyping: from typing import Iterator, Iterable class Bucket: ... def __len__(self) -> int: ... def __iter__(self) -> Iterator[int]: ... def collect(items: Iterable[int]) -> int: ... result: int = collect(Bucket()) # Passes type check Note that ABCs in typing module already provide structural behavior at runtime, isinstance(Bucket(), Iterable) returns True. The main goal of this proposal is to support such behavior statically. The same functionality will be provided for user-defined protocols, as specified below. The above code with a protocol class matches common Python conventions much better. It is also automatically extensible and works with additional, unrelated classes that happen to implement the required protocol. Nominal vs structural subtyping Structural subtyping is natural for Python programmers since it matches the runtime semantics of duck typing: an object that has certain properties is treated independently of its actual runtime class. However, as discussed in PEP 483, both nominal and structural subtyping have their strengths and weaknesses. Therefore, in this PEP we do not propose to replace the nominal subtyping described by PEP 484 with structural subtyping completely. Instead, protocol classes as specified in this PEP complement normal classes, and users are free to choose where to apply a particular solution. See section on rejected ideas at the end of this PEP for additional motivation. Non-goals At runtime, protocol classes will be simple ABCs. There is no intent to provide sophisticated runtime instance and class checks against protocol classes. This would be difficult and error-prone and will contradict the logic of PEP 484. As well, following PEP 484 and PEP 526 we state that protocols are completely optional: - No runtime semantics will be imposed for variables or parameters annotated with a protocol class. - Any checks will be performed only by third-party type checkers and other tools. - Programmers are free to not use them even if they use type annotations. - There is no intent to make protocols non-optional in the future. To reiterate, providing complex runtime semantics for protocol classes is not a goal of this PEP, the main goal is to provide a support and standards for static structural subtyping. The possibility to use protocols in the runtime context as ABCs is rather a minor bonus that exists mostly to provide a seamless transition for projects that already use ABCs. Existing Approaches to Structural Subtyping Before describing the actual specification, we review and comment on existing approaches related to structural subtyping in Python and other languages: - zope.interface [zope-interfaces] was one of the first widely used approaches to structural subtyping in Python. It is implemented by providing special classes to distinguish interface classes from normal classes, to mark interface attributes, and to explicitly declare implementation. For example: from zope.interface import Interface, Attribute, implementer class IEmployee(Interface): name = Attribute("Name of employee") def do(work): """Do some work""" @implementer(IEmployee) class Employee: name = 'Anonymous' def do(self, work): return work.start() Zope interfaces support various contracts and constraints for interface classes. For example: from zope.interface import invariant def required_contact(obj): if not (obj.email or obj.phone): raise Exception("At least one contact info is required") class IPerson(Interface): name = Attribute("Name") email = Attribute("Email Address") phone = Attribute("Phone Number") invariant(required_contact) Even more detailed invariants are supported. However, Zope interfaces rely entirely on runtime validation. Such focus on runtime properties goes beyond the scope of the current proposal, and static support for invariants might be difficult to implement. However, the idea of marking an interface class with a special base class is reasonable and easy to implement both statically and at runtime. - Python abstract base classes [abstract-classes] are the standard library tool to provide some functionality similar to structural subtyping. The drawback of this approach is the necessity to either subclass the abstract class or register an implementation explicitly: from abc import ABC class MyTuple(ABC): pass MyTuple.register(tuple) assert issubclass(tuple, MyTuple) assert isinstance((), MyTuple) As mentioned in the rationale, we want to avoid such necessity, especially in static context. However, in a runtime context, ABCs are good candidates for protocol classes and they are already used extensively in the typing module. - Abstract classes defined in collections.abc module [collections-abc] are slightly more advanced since they implement a custom __subclasshook__() method that allows runtime structural checks without explicit registration: from collections.abc import Iterable class MyIterable: def __iter__(self): return [] assert isinstance(MyIterable(), Iterable) Such behavior seems to be a perfect fit for both runtime and static behavior of protocols. As discussed in rationale, we propose to add static support for such behavior. In addition, to allow users to achieve such runtime behavior for user-defined protocols a special @runtime_checkable decorator will be provided, see detailed discussion below. - TypeScript [typescript] provides support for user-defined classes and interfaces. Explicit implementation declaration is not required and structural subtyping is verified statically. For example: interface LabeledItem { label: string; size?: int; } function printLabel(obj: LabeledItem) { console.log(obj.label); } let myObj = {size: 10, label: "Size 10 Object"}; printLabel(myObj); Note that optional interface members are supported. Also, TypeScript prohibits redundant members in implementations. While the idea of optional members looks interesting, it would complicate this proposal and it is not clear how useful it will be. Therefore, it is proposed to postpone this; see rejected ideas. In general, the idea of static protocol checking without runtime implications looks reasonable, and basically this proposal follows the same line. - Go [golang] uses a more radical approach and makes interfaces the primary way to provide type information. Also, assignments are used to explicitly ensure implementation: type SomeInterface interface { SomeMethod() ([]byte, error) } if _, ok := someval.(SomeInterface); ok { fmt.Printf("value implements some interface") } Both these ideas are questionable in the context of this proposal. See the section on rejected ideas. Specification Terminology We propose to use the term protocols for types supporting structural subtyping. The reason is that the term iterator protocol, for example, is widely understood in the community, and coming up with a new term for this concept in a statically typed context would just create confusion. This has the drawback that the term protocol becomes overloaded with two subtly different meanings: the first is the traditional, well-known but slightly fuzzy concept of protocols such as iterator; the second is the more explicitly defined concept of protocols in statically typed code. The distinction is not important most of the time, and in other cases we propose to just add a qualifier such as protocol classes when referring to the static type concept. If a class includes a protocol in its MRO, the class is called an explicit subclass of the protocol. If a class is a structural subtype of a protocol, it is said to implement the protocol and to be compatible with a protocol. If a class is compatible with a protocol but the protocol is not included in the MRO, the class is an implicit subtype of the protocol. (Note that one can explicitly subclass a protocol and still not implement it if a protocol attribute is set to None in the subclass, see Python [data-model] for details.) The attributes (variables and methods) of a protocol that are mandatory for other class in order to be considered a structural subtype are called protocol members. Defining a protocol Protocols are defined by including a special new class typing.Protocol (an instance of abc.ABCMeta) in the base classes list, typically at the end of the list. Here is a simple example: from typing import Protocol class SupportsClose(Protocol): def close(self) -> None: ... Now if one defines a class Resource with a close() method that has a compatible signature, it would implicitly be a subtype of SupportsClose, since the structural subtyping is used for protocol types: class Resource: ... def close(self) -> None: self.file.close() self.lock.release() Apart from few restrictions explicitly mentioned below, protocol types can be used in every context where a normal types can: def close_all(things: Iterable[SupportsClose]) -> None: for t in things: t.close() f = open('foo.txt') r = Resource() close_all([f, r]) # OK! close_all([1]) # Error: 'int' has no 'close' method Note that both the user-defined class Resource and the built-in IO type (the return type of open()) are considered subtypes of SupportsClose, because they provide a close() method with a compatible type signature. Protocol members All methods defined in the protocol class body are protocol members, both normal and decorated with @abstractmethod. If any parameters of a protocol method are not annotated, then their types are assumed to be Any (see PEP 484). Bodies of protocol methods are type checked. An abstract method that should not be called via super() ought to raise NotImplementedError. Example: from typing import Protocol from abc import abstractmethod class Example(Protocol): def first(self) -> int: # This is a protocol member return 42 @abstractmethod def second(self) -> int: # Method without a default implementation raise NotImplementedError Static methods, class methods, and properties are equally allowed in protocols. To define a protocol variable, one can use PEP 526 variable annotations in the class body. Additional attributes only defined in the body of a method by assignment via self are not allowed. The rationale for this is that the protocol class implementation is often not shared by subtypes, so the interface should not depend on the default implementation. Examples: from typing import Protocol, List class Template(Protocol): name: str # This is a protocol member value: int = 0 # This one too (with default) def method(self) -> None: self.temp: List[int] = [] # Error in type checker class Concrete: def __init__(self, name: str, value: int) -> None: self.name = name self.value = value def method(self) -> None: return var: Template = Concrete('value', 42) # OK To distinguish between protocol class variables and protocol instance variables, the special ClassVar annotation should be used as specified by PEP 526. By default, protocol variables as defined above are considered readable and writable. To define a read-only protocol variable, one can use an (abstract) property. Explicitly declaring implementation To explicitly declare that a certain class implements a given protocol, it can be used as a regular base class. In this case a class could use default implementations of protocol members. Static analysis tools are expected to automatically detect that a class implements a given protocol. So while it's possible to subclass a protocol explicitly, it's not necessary to do so for the sake of type-checking. The default implementations cannot be used if the subtype relationship is implicit and only via structural subtyping -- the semantics of inheritance is not changed. Examples: class PColor(Protocol): @abstractmethod def draw(self) -> str: ... def complex_method(self) -> int: # some complex code here ... class NiceColor(PColor): def draw(self) -> str: return "deep blue" class BadColor(PColor): def draw(self) -> str: return super().draw() # Error, no default implementation class ImplicitColor: # Note no 'PColor' base here def draw(self) -> str: return "probably gray" def complex_method(self) -> int: # class needs to implement this ... nice: NiceColor another: ImplicitColor def represent(c: PColor) -> None: print(c.draw(), c.complex_method()) represent(nice) # OK represent(another) # Also OK Note that there is little difference between explicit and implicit subtypes, the main benefit of explicit subclassing is to get some protocol methods "for free". In addition, type checkers can statically verify that the class actually implements the protocol correctly: class RGB(Protocol): rgb: Tuple[int, int, int] @abstractmethod def intensity(self) -> int: return 0 class Point(RGB): def __init__(self, red: int, green: int, blue: str) -> None: self.rgb = red, green, blue # Error, 'blue' must be 'int' # Type checker might warn that 'intensity' is not defined A class can explicitly inherit from multiple protocols and also from normal classes. In this case methods are resolved using normal MRO and a type checker verifies that all subtyping are correct. The semantics of @abstractmethod is not changed, all of them must be implemented by an explicit subclass before it can be instantiated. Merging and extending protocols The general philosophy is that protocols are mostly like regular ABCs, but a static type checker will handle them specially. Subclassing a protocol class would not turn the subclass into a protocol unless it also has typing.Protocol as an explicit base class. Without this base, the class is "downgraded" to a regular ABC that cannot be used with structural subtyping. The rationale for this rule is that we don't want to accidentally have some class act as a protocol just because one of its base classes happens to be one. We still slightly prefer nominal subtyping over structural subtyping in the static typing world. A subprotocol can be defined by having both one or more protocols as immediate base classes and also having typing.Protocol as an immediate base class: from typing import Sized, Protocol class SizedAndClosable(Sized, Protocol): def close(self) -> None: ... Now the protocol SizedAndClosable is a protocol with two methods, __len__ and close. If one omits Protocol in the base class list, this would be a regular (non-protocol) class that must implement Sized. Alternatively, one can implement SizedAndClosable protocol by merging the SupportsClose protocol from the example in the definition section with typing.Sized: from typing import Sized class SupportsClose(Protocol): def close(self) -> None: ... class SizedAndClosable(Sized, SupportsClose, Protocol): pass The two definitions of SizedAndClosable are equivalent. Subclass relationships between protocols are not meaningful when considering subtyping, since structural compatibility is the criterion, not the MRO. If Protocol is included in the base class list, all the other base classes must be protocols. A protocol can't extend a regular class, see rejected ideas for rationale. Note that rules around explicit subclassing are different from regular ABCs, where abstractness is simply defined by having at least one abstract method being unimplemented. Protocol classes must be marked explicitly. Generic protocols Generic protocols are important. For example, SupportsAbs, Iterable and Iterator are generic protocols. They are defined similar to normal non-protocol generic types: class Iterable(Protocol[T]): @abstractmethod def __iter__(self) -> Iterator[T]: ... Protocol[T, S, ...] is allowed as a shorthand for Protocol, Generic[T, S, ...]. User-defined generic protocols support explicitly declared variance. Type checkers will warn if the inferred variance is different from the declared variance. Examples: T = TypeVar('T') T_co = TypeVar('T_co', covariant=True) T_contra = TypeVar('T_contra', contravariant=True) class Box(Protocol[T_co]): def content(self) -> T_co: ... box: Box[float] second_box: Box[int] box = second_box # This is OK due to the covariance of 'Box'. class Sender(Protocol[T_contra]): def send(self, data: T_contra) -> int: ... sender: Sender[float] new_sender: Sender[int] new_sender = sender # OK, 'Sender' is contravariant. class Proto(Protocol[T]): attr: T # this class is invariant, since it has a mutable attribute var: Proto[float] another_var: Proto[int] var = another_var # Error! 'Proto[float]' is incompatible with 'Proto[int]'. Note that unlike nominal classes, de facto covariant protocols cannot be declared as invariant, since this can break transitivity of subtyping (see rejected ideas for details). For example: T = TypeVar('T') class AnotherBox(Protocol[T]): # Error, this protocol is covariant in T, def content(self) -> T: # not invariant. ... Recursive protocols Recursive protocols are also supported. Forward references to the protocol class names can be given as strings as specified by PEP 484. Recursive protocols are useful for representing self-referential data structures like trees in an abstract fashion: class Traversable(Protocol): def leaves(self) -> Iterable['Traversable']: ... Note that for recursive protocols, a class is considered a subtype of the protocol in situations where the decision depends on itself. Continuing the previous example: class SimpleTree: def leaves(self) -> List['SimpleTree']: ... root: Traversable = SimpleTree() # OK class Tree(Generic[T]): def leaves(self) -> List['Tree[T]']: ... def walk(graph: Traversable) -> None: ... tree: Tree[float] = Tree() walk(tree) # OK, 'Tree[float]' is a subtype of 'Traversable' Self-types in protocols The self-types in protocols follow the corresponding specification <484#annotating-instance-and-class-methods> of PEP 484. For example: C = TypeVar('C', bound='Copyable') class Copyable(Protocol): def copy(self: C) -> C: class One: def copy(self) -> 'One': ... T = TypeVar('T', bound='Other') class Other: def copy(self: T) -> T: ... c: Copyable c = One() # OK c = Other() # Also OK Callback protocols Protocols can be used to define flexible callback types that are hard (or even impossible) to express using the Callable[...] syntax specified by PEP 484, such as variadic, overloaded, and complex generic callbacks. They can be defined as protocols with a __call__ member: from typing import Optional, List, Protocol class Combiner(Protocol): def __call__(self, *vals: bytes, maxlen: Optional[int] = None) -> List[bytes]: ... def good_cb(*vals: bytes, maxlen: Optional[int] = None) -> List[bytes]: ... def bad_cb(*vals: bytes, maxitems: Optional[int]) -> List[bytes]: ... comb: Combiner = good_cb # OK comb = bad_cb # Error! Argument 2 has incompatible type because of # different name and kind in the callback Callback protocols and Callable[...] types can be used interchangeably. Using Protocols Subtyping relationships with other types Protocols cannot be instantiated, so there are no values whose runtime type is a protocol. For variables and parameters with protocol types, subtyping relationships are subject to the following rules: - A protocol is never a subtype of a concrete type. - A concrete type X is a subtype of protocol P if and only if X implements all protocol members of P with compatible types. In other words, subtyping with respect to a protocol is always structural. - A protocol P1 is a subtype of another protocol P2 if P1 defines all protocol members of P2 with compatible types. Generic protocol types follow the same rules of variance as non-protocol types. Protocol types can be used in all contexts where any other types can be used, such as in Union, ClassVar, type variables bounds, etc. Generic protocols follow the rules for generic abstract classes, except for using structural compatibility instead of compatibility defined by inheritance relationships. Static type checkers will recognize protocol implementations, even if the corresponding protocols are not imported: # file lib.py from typing import Sized T = TypeVar('T', contravariant=True) class ListLike(Sized, Protocol[T]): def append(self, x: T) -> None: pass def populate(lst: ListLike[int]) -> None: ... # file main.py from lib import populate # Note that ListLike is NOT imported class MockStack: def __len__(self) -> int: return 42 def append(self, x: int) -> None: print(x) populate([1, 2, 3]) # Passes type check populate(MockStack()) # Also OK Unions and intersections of protocols Union of protocol classes behaves the same way as for non-protocol classes. For example: from typing import Union, Optional, Protocol class Exitable(Protocol): def exit(self) -> int: ... class Quittable(Protocol): def quit(self) -> Optional[int]: ... def finish(task: Union[Exitable, Quittable]) -> int: ... class DefaultJob: ... def quit(self) -> int: return 0 finish(DefaultJob()) # OK One can use multiple inheritance to define an intersection of protocols. Example: from typing import Iterable, Hashable class HashableFloats(Iterable[float], Hashable, Protocol): pass def cached_func(args: HashableFloats) -> float: ... cached_func((1, 2, 3)) # OK, tuple is both hashable and iterable If this will prove to be a widely used scenario, then a special intersection type construct could be added in future as specified by PEP 483, see rejected ideas for more details. Type[] and class objects vs protocols Variables and parameters annotated with Type[Proto] accept only concrete (non-protocol) subtypes of Proto. The main reason for this is to allow instantiation of parameters with such type. For example: class Proto(Protocol): @abstractmethod def meth(self) -> int: ... class Concrete: def meth(self) -> int: return 42 def fun(cls: Type[Proto]) -> int: return cls().meth() # OK fun(Proto) # Error fun(Concrete) # OK The same rule applies to variables: var: Type[Proto] var = Proto # Error var = Concrete # OK var().meth() # OK Assigning an ABC or a protocol class to a variable is allowed if it is not explicitly typed, and such assignment creates a type alias. For normal (non-abstract) classes, the behavior of Type[] is not changed. A class object is considered an implementation of a protocol if accessing all members on it results in types compatible with the protocol members. For example: from typing import Any, Protocol class ProtoA(Protocol): def meth(self, x: int) -> int: ... class ProtoB(Protocol): def meth(self, obj: Any, x: int) -> int: ... class C: def meth(self, x: int) -> int: ... a: ProtoA = C # Type check error, signatures don't match! b: ProtoB = C # OK NewType() and type aliases Protocols are essentially anonymous. To emphasize this point, static type checkers might refuse protocol classes inside NewType() to avoid an illusion that a distinct type is provided: from typing import NewType, Protocol, Iterator class Id(Protocol): code: int secrets: Iterator[bytes] UserId = NewType('UserId', Id) # Error, can't provide distinct type In contrast, type aliases are fully supported, including generic type aliases: from typing import TypeVar, Reversible, Iterable, Sized T = TypeVar('T') class SizedIterable(Iterable[T], Sized, Protocol): pass CompatReversible = Union[Reversible[T], SizedIterable[T]] Modules as implementations of protocols A module object is accepted where a protocol is expected if the public interface of the given module is compatible with the expected protocol. For example: # file default_config.py timeout = 100 one_flag = True other_flag = False # file main.py import default_config from typing import Protocol class Options(Protocol): timeout: int one_flag: bool other_flag: bool def setup(options: Options) -> None: ... setup(default_config) # OK To determine compatibility of module level functions, the self argument of the corresponding protocol methods is dropped. For example: # callbacks.py def on_error(x: int) -> None: ... def on_success() -> None: ... # main.py import callbacks from typing import Protocol class Reporter(Protocol): def on_error(self, x: int) -> None: ... def on_success(self) -> None: ... rp: Reporter = callbacks # Passes type check @runtime_checkable decorator and narrowing types by isinstance() The default semantics is that isinstance() and issubclass() fail for protocol types. This is in the spirit of duck typing -- protocols basically would be used to model duck typing statically, not explicitly at runtime. However, it should be possible for protocol types to implement custom instance and class checks when this makes sense, similar to how Iterable and other ABCs in collections.abc and typing already do it, but this is limited to non-generic and unsubscripted generic protocols (Iterable is statically equivalent to Iterable[Any]). The typing module will define a special @runtime_checkable class decorator that provides the same semantics for class and instance checks as for collections.abc classes, essentially making them "runtime protocols": from typing import runtime_checkable, Protocol @runtime_checkable class SupportsClose(Protocol): def close(self): ... assert isinstance(open('some/file'), SupportsClose) Note that instance checks are not 100% reliable statically, this is why this behavior is opt-in, see section on rejected ideas for examples. The most type checkers can do is to treat isinstance(obj, Iterator) roughly as a simpler way to write hasattr(x, '__iter__') and hasattr(x, '__next__'). To minimize the risks for this feature, the following rules are applied. Definitions: - Data, and non-data protocols: A protocol is called non-data protocol if it only contains methods as members (for example Sized, Iterator, etc). A protocol that contains at least one non-method member (like x: int) is called a data protocol. - Unsafe overlap: A type X is called unsafely overlapping with a protocol P, if X is not a subtype of P, but it is a subtype of the type erased version of P where all members have type Any. In addition, if at least one element of a union unsafely overlaps with a protocol P, then the whole union is unsafely overlapping with P. Specification: - A protocol can be used as a second argument in isinstance() and issubclass() only if it is explicitly opt-in by @runtime_checkable decorator. This requirement exists because protocol checks are not type safe in case of dynamically set attributes, and because type checkers can only prove that an isinstance() check is safe only for a given class, not for all its subclasses. - isinstance() can be used with both data and non-data protocols, while issubclass() can be used only with non-data protocols. This restriction exists because some data attributes can be set on an instance in constructor and this information is not always available on the class object. - Type checkers should reject an isinstance() or issubclass() call, if there is an unsafe overlap between the type of the first argument and the protocol. - Type checkers should be able to select a correct element from a union after a safe isinstance() or issubclass() call. For narrowing from non-union types, type checkers can use their best judgement (this is intentionally unspecified, since a precise specification would require intersection types). Using Protocols in Python 2.7 - 3.5 Variable annotation syntax was added in Python 3.6, so that the syntax for defining protocol variables proposed in specification section can't be used if support for earlier versions is needed. To define these in a manner compatible with older versions of Python one can use properties. Properties can be settable and/or abstract if needed: class Foo(Protocol): @property def c(self) -> int: return 42 # Default value can be provided for property... @abstractproperty def d(self) -> int: # ... or it can be abstract return 0 Also function type comments can be used as per PEP 484 (for example to provide compatibility with Python 2). The typing module changes proposed in this PEP will also be backported to earlier versions via the backport currently available on PyPI. Runtime Implementation of Protocol Classes Implementation details The runtime implementation could be done in pure Python without any effects on the core interpreter and standard library except in the typing module, and a minor update to collections.abc: - Define class typing.Protocol similar to typing.Generic. - Implement functionality to detect whether a class is a protocol or not. Add a class attribute _is_protocol = True if that is the case. Verify that a protocol class only has protocol base classes in the MRO (except for object). - Implement @runtime_checkable that allows __subclasshook__() performing structural instance and subclass checks as in collections.abc classes. - All structural subtyping checks will be performed by static type checkers, such as mypy [mypy]. No additional support for protocol validation will be provided at runtime. Changes in the typing module The following classes in typing module will be protocols: - Callable - Awaitable - Iterable, Iterator - AsyncIterable, AsyncIterator - Hashable - Sized - Container - Collection - Reversible - ContextManager, AsyncContextManager - SupportsAbs (and other Supports* classes) Most of these classes are small and conceptually simple. It is easy to see what are the methods these protocols implement, and immediately recognize the corresponding runtime protocol counterpart. Practically, few changes will be needed in typing since some of these classes already behave the necessary way at runtime. Most of these will need to be updated only in the corresponding typeshed stubs [typeshed]. All other concrete generic classes such as List, Set, IO, Deque, etc are sufficiently complex that it makes sense to keep them non-protocols (i.e. require code to be explicit about them). Also, it is too easy to leave some methods unimplemented by accident, and explicitly marking the subclass relationship allows type checkers to pinpoint the missing implementations. Introspection The existing class introspection machinery (dir, __annotations__ etc) can be used with protocols. In addition, all introspection tools implemented in the typing module will support protocols. Since all attributes need to be defined in the class body based on this proposal, protocol classes will have even better perspective for introspection than regular classes where attributes can be defined implicitly -- protocol attributes can't be initialized in ways that are not visible to introspection (using setattr(), assignment via self, etc.). Still, some things like types of attributes will not be visible at runtime in Python 3.5 and earlier, but this looks like a reasonable limitation. There will be only limited support of isinstance() and issubclass() as discussed above (these will always fail with TypeError for subscripted generic protocols, since a reliable answer could not be given at runtime in this case). But together with other introspection tools this give a reasonable perspective for runtime type checking tools. Rejected/Postponed Ideas The ideas in this section were previously discussed in [several] [discussions] [elsewhere]. Make every class a protocol by default Some languages such as Go make structural subtyping the only or the primary form of subtyping. We could achieve a similar result by making all classes protocols by default (or even always). However we believe that it is better to require classes to be explicitly marked as protocols, for the following reasons: - Protocols don't have some properties of regular classes. In particular, isinstance(), as defined for normal classes, is based on the nominal hierarchy. In order to make everything a protocol by default, and have isinstance() work would require changing its semantics, which won't happen. - Protocol classes should generally not have many method implementations, as they describe an interface, not an implementation. Most classes have many method implementations, making them bad protocol classes. - Experience suggests that many classes are not practical as protocols anyway, mainly because their interfaces are too large, complex or implementation-oriented (for example, they may include de facto private attributes and methods without a __ prefix). - Most actually useful protocols in existing Python code seem to be implicit. The ABCs in typing and collections.abc are rather an exception, but even they are recent additions to Python and most programmers do not use them yet. - Many built-in functions only accept concrete instances of int (and subclass instances), and similarly for other built-in classes. Making int a structural type wouldn't be safe without major changes to the Python runtime, which won't happen. Protocols subclassing normal classes The main rationale to prohibit this is to preserve transitivity of subtyping, consider this example: from typing import Protocol class Base: attr: str class Proto(Base, Protocol): def meth(self) -> int: ... class C: attr: str def meth(self) -> int: return 0 Now, C is a subtype of Proto, and Proto is a subtype of Base. But C cannot be a subtype of Base (since the latter is not a protocol). This situation would be really weird. In addition, there is an ambiguity about whether attributes of Base should become protocol members of Proto. Support optional protocol members We can come up with examples where it would be handy to be able to say that a method or data attribute does not need to be present in a class implementing a protocol, but if it is present, it must conform to a specific signature or type. One could use a hasattr() check to determine whether they can use the attribute on a particular instance. Languages such as TypeScript have similar features and apparently they are pretty commonly used. The current realistic potential use cases for protocols in Python don't require these. In the interest of simplicity, we propose to not support optional methods or attributes. We can always revisit this later if there is an actual need. Allow only protocol methods and force use of getters and setters One could argue that protocols typically only define methods, but not variables. However, using getters and setters in cases where only a simple variable is needed would be quite unpythonic. Moreover, the widespread use of properties (that often act as type validators) in large code bases is partially due to previous absence of static type checkers for Python, the problem that PEP 484 and this PEP are aiming to solve. For example: # without static types class MyClass: @property def my_attr(self): return self._my_attr @my_attr.setter def my_attr(self, value): if not isinstance(value, int): raise ValidationError("An integer expected for my_attr") self._my_attr = value # with static types class MyClass: my_attr: int Support non-protocol members There was an idea to make some methods "non-protocol" (i.e. not necessary to implement, and inherited in explicit subclassing), but it was rejected, since this complicates things. For example, consider this situation: class Proto(Protocol): @abstractmethod def first(self) -> int: raise NotImplementedError def second(self) -> int: return self.first() + 1 def fun(arg: Proto) -> None: arg.second() The question is should this be an error? We think most people would expect this to be valid. Therefore, to be on the safe side, we need to require both methods to be implemented in implicit subclasses. In addition, if one looks at definitions in collections.abc, there are very few methods that could be considered "non-protocol". Therefore, it was decided to not introduce "non-protocol" methods. There is only one downside to this: it will require some boilerplate for implicit subtypes of "large" protocols. But, this doesn't apply to "built-in" protocols that are all "small" (i.e. have only few abstract methods). Also, such style is discouraged for user-defined protocols. It is recommended to create compact protocols and combine them. Make protocols interoperable with other approaches The protocols as described here are basically a minimal extension to the existing concept of ABCs. We argue that this is the way they should be understood, instead of as something that replaces Zope interfaces, for example. Attempting such interoperabilities will significantly complicate both the concept and the implementation. On the other hand, Zope interfaces are conceptually a superset of protocols defined here, but using an incompatible syntax to define them, because before PEP 526 there was no straightforward way to annotate attributes. In the 3.6+ world, zope.interface might potentially adopt the Protocol syntax. In this case, type checkers could be taught to recognize interfaces as protocols and make simple structural checks with respect to them. Use assignments to check explicitly that a class implements a protocol In the Go language the explicit checks for implementation are performed via dummy assignments [golang]. Such a way is also possible with the current proposal. Example: class A: def __len__(self) -> float: return ... _: Sized = A() # Error: A.__len__ doesn't conform to 'Sized' # (Incompatible return type 'float') This approach moves the check away from the class definition and it almost requires a comment as otherwise the code probably would not make any sense to an average reader -- it looks like dead code. Besides, in the simplest form it requires one to construct an instance of A, which could be problematic if this requires accessing or allocating some resources such as files or sockets. We could work around the latter by using a cast, for example, but then the code would be ugly. Therefore, we discourage the use of this pattern. Support isinstance() checks by default The problem with this is instance checks could be unreliable, except for situations where there is a common signature convention such as Iterable. For example: class P(Protocol): def common_method_name(self, x: int) -> int: ... class X: <a bunch of methods> def common_method_name(self) -> None: ... # Note different signature def do_stuff(o: Union[P, X]) -> int: if isinstance(o, P): return o.common_method_name(1) # Results in TypeError not caught # statically if o is an X instance. Another potentially problematic case is assignment of attributes after instantiation: class P(Protocol): x: int class C: def initialize(self) -> None: self.x = 0 c = C() isinstance(c, P) # False c.initialize() isinstance(c, P) # True def f(x: Union[P, int]) -> None: if isinstance(x, P): # Static type of x is P here. ... else: # Static type of x is int, but can be other type at runtime... print(x + 1) f(C()) # ...causing a TypeError. We argue that requiring an explicit class decorator would be better, since one can then attach warnings about problems like this in the documentation. The user would be able to evaluate whether the benefits outweigh the potential for confusion for each protocol and explicitly opt in -- but the default behavior would be safer. Finally, it will be easy to make this behavior default if necessary, while it might be problematic to make it opt-in after being default. Provide a special intersection type construct There was an idea to allow Proto = All[Proto1, Proto2, ...] as a shorthand for: class Proto(Proto1, Proto2, ..., Protocol): pass However, it is not yet clear how popular/useful it will be and implementing this in type checkers for non-protocol classes could be difficult. Finally, it will be very easy to add this later if needed. Prohibit explicit subclassing of protocols by non-protocols This was rejected for the following reasons: - Backward compatibility: People are already using ABCs, including generic ABCs from typing module. If we prohibit explicit subclassing of these ABCs, then quite a lot of code will break. - Convenience: There are existing protocol-like ABCs (that may be turned into protocols) that have many useful "mix-in" (non-abstract) methods. For example, in the case of Sequence one only needs to implement __getitem__ and __len__ in an explicit subclass, and one gets __iter__, __contains__, __reversed__, index, and count for free. - Explicit subclassing makes it explicit that a class implements a particular protocol, making subtyping relationships easier to see. - Type checkers can warn about missing protocol members or members with incompatible types more easily, without having to use hacks like dummy assignments discussed above in this section. - Explicit subclassing makes it possible to force a class to be considered a subtype of a protocol (by using # type: ignore together with an explicit base class) when it is not strictly compatible, such as when it has an unsafe override. Covariant subtyping of mutable attributes Rejected because covariant subtyping of mutable attributes is not safe. Consider this example: class P(Protocol): x: float def f(arg: P) -> None: arg.x = 0.42 class C: x: int c = C() f(c) # Would typecheck if covariant subtyping # of mutable attributes were allowed. c.x >> 1 # But this fails at runtime It was initially proposed to allow this for practical reasons, but it was subsequently rejected, since this may mask some hard to spot bugs. Overriding inferred variance of protocol classes It was proposed to allow declaring protocols as invariant if they are actually covariant or contravariant (as it is possible for nominal classes, see PEP 484). However, it was decided not to do this because of several downsides: - Declared protocol invariance breaks transitivity of sub-typing. Consider this situation: T = TypeVar('T') class P(Protocol[T]): # Protocol is declared as invariant. def meth(self) -> T: ... class C: def meth(self) -> float: ... class D(C): def meth(self) -> int: ... Now we have that D is a subtype of C, and C is a subtype of P[float]. But D is not a subtype of P[float] since D implements P[int], and P is invariant. There is a possibility to "cure" this by looking for protocol implementations in MROs but this will be too complex in a general case, and this "cure" requires abandoning simple idea of purely structural subtyping for protocols. - Subtyping checks will always require type inference for protocols. In the above example a user may complain: "Why did you infer P[int] for my D? It implements P[float]!". Normally, inference can be overruled by an explicit annotation, but here this will require explicit subclassing, defeating the purpose of using protocols. - Allowing overriding variance will make impossible more detailed error messages in type checkers citing particular conflicts in member type signatures. - Finally, explicit is better than implicit in this case. Requiring user to declare correct variance will simplify understanding the code and will avoid unexpected errors at the point of use. Support adapters and adaptation Adaptation was proposed by PEP 246 (rejected) and is supported by zope.interface, see the Zope documentation on adapter registries. Adapters is quite an advanced concept, and PEP 484 supports unions and generic aliases that can be used instead of adapters. This can be illustrated with an example of Iterable protocol, there is another way of supporting iteration by providing __getitem__ and __len__. If a function supports both this way and the now standard __iter__ method, then it could be annotated by a union type: class OldIterable(Sized, Protocol[T]): def __getitem__(self, item: int) -> T: ... CompatIterable = Union[Iterable[T], OldIterable[T]] class A: def __iter__(self) -> Iterator[str]: ... class B: def __len__(self) -> int: ... def __getitem__(self, item: int) -> str: ... def iterate(it: CompatIterable[str]) -> None: ... iterate(A()) # OK iterate(B()) # OK Since there is a reasonable alternative for such cases with existing tooling, it is therefore proposed not to include adaptation in this PEP. Call structural base types "interfaces" "Protocol" is a term already widely used in Python to describe duck typing contracts such as the iterator protocol (providing __iter__ and __next__), and the descriptor protocol (providing __get__, __set__, and __delete__). In addition to this and other reasons given in specification, protocols are different from Java interfaces in several aspects: protocols don't require explicit declaration of implementation (they are mainly oriented on duck-typing), protocols can have default implementations of members and store state. Make protocols special objects at runtime rather than normal ABCs Making protocols non-ABCs will make the backwards compatibility problematic if possible at all. For example, collections.abc.Iterable is already an ABC, and lots of existing code use patterns like isinstance(obj, collections.abc.Iterable) and similar checks with other ABCs (also in a structural manner, i.e., via __subclasshook__). Disabling this behavior will cause breakages. If we keep this behavior for ABCs in collections.abc but will not provide a similar runtime behavior for protocols in typing, then a smooth transition to protocols will be not possible. In addition, having two parallel hierarchies may cause confusions. Backwards Compatibility This PEP is fully backwards compatible. Implementation The mypy type checker fully supports protocols (modulo a few known bugs). This includes treating all the builtin protocols, such as Iterable structurally. The runtime implementation of protocols is available in typing_extensions module on PyPI. References Copyright This document has been placed in the public domain. abstract-classes https://docs.python.org/3/library/abc.html collections-abc https://docs.python.org/3/library/collections.abc.html data-model https://docs.python.org/3/reference/datamodel.html#special-method-names discussions https://github.com/python/typing/issues/11 elsewhere https://github.com/python/peps/pull/224 golang https://golang.org/doc/effective_go.html#interfaces_and_types mypy http://github.com/python/mypy/ several https://mail.python.org/pipermail/python-ideas/2015-September/thread.html#35859 typescript https://www.typescriptlang.org/docs/handbook/interfaces.html typeshed https://github.com/python/typeshed/ typing https://docs.python.org/3/library/typing.html wiki-structural https://en.wikipedia.org/wiki/Structural_type_system zope-interfaces https://zopeinterface.readthedocs.io/en/latest/
python-peps
2024-10-18T13:23:33.158791
2017-03-05T00:00:00
{ "license": "Public Domain", "url": "https://peps.python.org/pep-0544/", "authors": [ "Ivan Levkivskyi", "Jukka Lehtosalo", "Łukasz Langa" ], "pep_number": "0544", "pandoc_version": "3.5" }
0354
PEP: 354 Title: Enumerations in Python Version: $Revision$ Last-Modified: $Date$ Author: Ben Finney <[email protected]> Status: Superseded Type: Standards Track Content-Type: text/x-rst Created: 20-Dec-2005 Python-Version: 2.6 Post-History: 20-Dec-2005 Superseded-By: 435 Rejection Notice This PEP has been rejected. This doesn't slot nicely into any of the existing modules (like collections), and the Python standard library eschews having lots of individual data structures in their own modules. Also, the PEP has generated no widespread interest. For those who need enumerations, there are cookbook recipes and PyPI packages that meet these needs. Note: this PEP was superseded by PEP 435, which has been accepted in May 2013. Abstract This PEP specifies an enumeration data type for Python. An enumeration is an exclusive set of symbolic names bound to arbitrary unique values. Values within an enumeration can be iterated and compared, but the values have no inherent relationship to values outside the enumeration. Motivation The properties of an enumeration are useful for defining an immutable, related set of constant values that have a defined sequence but no inherent semantic meaning. Classic examples are days of the week (Sunday through Saturday) and school assessment grades ('A' through 'D', and 'F'). Other examples include error status values and states within a defined process. It is possible to simply define a sequence of values of some other basic type, such as int or str, to represent discrete arbitrary values. However, an enumeration ensures that such values are distinct from any others, and that operations without meaning ("Wednesday times two") are not defined for these values. Specification An enumerated type is created from a sequence of arguments to the type's constructor: >>> Weekdays = enum('sun', 'mon', 'tue', 'wed', 'thu', 'fri', 'sat') >>> Grades = enum('A', 'B', 'C', 'D', 'F') Enumerations with no values are meaningless. The exception EnumEmptyError is raised if the constructor is called with no value arguments. The values are bound to attributes of the new enumeration object: >>> today = Weekdays.mon The values can be compared: >>> if today == Weekdays.fri: ... print "Get ready for the weekend" Values within an enumeration cannot be meaningfully compared except with values from the same enumeration. The comparison operation functions return NotImplemented[1] when a value from an enumeration is compared against any value not from the same enumeration or of a different type: >>> gym_night = Weekdays.wed >>> gym_night.__cmp__(Weekdays.mon) 1 >>> gym_night.__cmp__(Weekdays.wed) 0 >>> gym_night.__cmp__(Weekdays.fri) -1 >>> gym_night.__cmp__(23) NotImplemented >>> gym_night.__cmp__("wed") NotImplemented >>> gym_night.__cmp__(Grades.B) NotImplemented This allows the operation to succeed, evaluating to a boolean value: >>> gym_night = Weekdays.wed >>> gym_night < Weekdays.mon False >>> gym_night < Weekdays.wed False >>> gym_night < Weekdays.fri True >>> gym_night < 23 False >>> gym_night > 23 True >>> gym_night > "wed" True >>> gym_night > Grades.B True Coercing a value from an enumeration to a str results in the string that was specified for that value when constructing the enumeration: >>> gym_night = Weekdays.wed >>> str(gym_night) 'wed' The sequence index of each value from an enumeration is exported as an integer via that value's index attribute: >>> gym_night = Weekdays.wed >>> gym_night.index 3 An enumeration can be iterated, returning its values in the sequence they were specified when the enumeration was created: >>> print [str(day) for day in Weekdays] ['sun', 'mon', 'tue', 'wed', 'thu', 'fri', 'sat'] Values from an enumeration are hashable, and can be used as dict keys: >>> plans = {} >>> plans[Weekdays.sat] = "Feed the horse" The normal usage of enumerations is to provide a set of possible values for a data type, which can then be used to map to other information about the values: >>> for report_grade in Grades: ... report_students[report_grade] = \ ... [s for s in students if students.grade == report_grade] Rationale -- Other designs considered All in one class Some implementations have the enumeration and its values all as attributes of a single object or class. This PEP specifies a design where the enumeration is a container, and the values are simple comparables. It was felt that attempting to place all the properties of enumeration within a single class complicates the design without apparent benefit. Metaclass for creating enumeration classes The enumerations specified in this PEP are instances of an enum type. Some alternative designs implement each enumeration as its own class, and a metaclass to define common properties of all enumerations. One motivation for having a class (rather than an instance) for each enumeration is to allow subclasses of enumerations, extending and altering an existing enumeration. A class, though, implies that instances of that class will be created; it is difficult to imagine what it means to have separate instances of a "days of the week" class, where each instance contains all days. This usually leads to having each class follow the Singleton pattern, further complicating the design. In contrast, this PEP specifies enumerations that are not expected to be extended or modified. It is, of course, possible to create a new enumeration from the string values of an existing one, or even subclass the enum type if desired. Values related to other types Some designs express a strong relationship to some other value, such as a particular integer or string, for each enumerated value. This results in using such values in contexts where the enumeration has no meaning, and unnecessarily complicates the design. The enumerated values specified in this PEP export the values used to create them, and can be compared for equality with any other value, but sequence comparison with values outside the enumeration is explicitly not implemented. Hiding attributes of enumerated values A previous design had the enumerated values hiding as much as possible about their implementation, to the point of not exporting the string key and sequence index. The design in this PEP acknowledges that programs will often find it convenient to know the enumerated value's enumeration type, sequence index, and string key specified for the value. These are exported by the enumerated value as attributes. Implementation This design is based partly on a recipe[2] from the Python Cookbook. The PyPI package enum[3] provides a Python implementation of the data types described in this PEP. References and Footnotes Copyright This document has been placed in the public domain. Local Variables: mode: indented-text indent-tabs-mode: nil sentence-end-double-space: t fill-column: 70 End: [1] The NotImplemented return value from comparison operations signals the Python interpreter to attempt alternative comparisons or other fallbacks. <http://docs.python.org/reference/datamodel.html#the-standard-type-hierarchy> [2] "First Class Enums in Python", Zoran Isailovski, Python Cookbook recipe 413486 <http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/413486> [3] Python Package Index, package enum <http://cheeseshop.python.org/pypi/enum/>
python-peps
2024-10-18T13:23:33.171673
2005-12-20T00:00:00
{ "license": "Public Domain", "url": "https://peps.python.org/pep-0354/", "authors": [ "Ben Finney" ], "pep_number": "0354", "pandoc_version": "3.5" }
0392
PEP: 392 Title: Python 3.2 Release Schedule Version: $Revision$ Last-Modified: $Date$ Author: Georg Brandl <[email protected]> Status: Final Type: Informational Topic: Release Content-Type: text/x-rst Created: 30-Dec-2009 Python-Version: 3.2 Abstract This document describes the development and release schedule for the Python 3.2 series. The schedule primarily concerns itself with PEP-sized items. Release Manager and Crew - 3.2 Release Manager: Georg Brandl - Windows installers: Martin v. Loewis - Mac installers: Ronald Oussoren - Documentation: Georg Brandl 3.2 Lifespan 3.2 will receive bugfix updates approximately every 4-6 months for approximately 18 months. After the release of 3.3.0 final (see PEP 398), a final 3.2 bugfix update will be released. After that, security updates (source only) will be released until 5 years after the release of 3.2 final, which was planned for February 2016. As of 2016-02-20, Python 3.2.x reached end-of-life status. The final source release was 3.2.6 in October 2014. Release Schedule 3.2 schedule - 3.2 alpha 1: August 1, 2010 - 3.2 alpha 2: September 6, 2010 - 3.2 alpha 3: October 12, 2010 - 3.2 alpha 4: November 16, 2010 - 3.2 beta 1: December 6, 2010 (No new features beyond this point.) - 3.2 beta 2: December 20, 2010 - 3.2 candidate 1: January 16, 2011 - 3.2 candidate 2: January 31, 2011 - 3.2 candidate 3: February 14, 2011 - 3.2 final: February 20, 2011 3.2.1 schedule - 3.2.1 beta 1: May 8, 2011 - 3.2.1 candidate 1: May 17, 2011 - 3.2.1 candidate 2: July 3, 2011 - 3.2.1 final: July 11, 2011 3.2.2 schedule - 3.2.2 candidate 1: August 14, 2011 - 3.2.2 final: September 4, 2011 3.2.3 schedule - 3.2.3 candidate 1: February 25, 2012 - 3.2.3 candidate 2: March 18, 2012 - 3.2.3 final: April 11, 2012 3.2.4 schedule - 3.2.4 candidate 1: March 23, 2013 - 3.2.4 final: April 6, 2013 3.2.5 schedule (regression fix release) - 3.2.5 final: May 13, 2013 -- Only security releases after 3.2.5 -- 3.2.6 schedule - 3.2.6 candidate 1 (source-only release): October 4, 2014 - 3.2.6 final (source-only release): October 11, 2014 Features for 3.2 Note that PEP 3003 is in effect: no changes to language syntax and no additions to the builtins may be made. No large-scale changes have been recorded yet. Copyright This document has been placed in the public domain. Local Variables: mode: indented-text indent-tabs-mode: nil sentence-end-double-space: t fill-column: 70 coding: utf-8 End:
python-peps
2024-10-18T13:23:33.184869
2009-12-30T00:00:00
{ "license": "Public Domain", "url": "https://peps.python.org/pep-0392/", "authors": [ "Georg Brandl" ], "pep_number": "0392", "pandoc_version": "3.5" }
0711
PEP: 711 Title: PyBI: a standard format for distributing Python Binaries Author: Nathaniel J. Smith <[email protected]> PEP-Delegate: TODO Discussions-To: https://discuss.python.org/t/pep-711-pybi-a-standard-format-for-distributing-python-binaries/25547 Status: Draft Type: Standards Track Topic: Packaging Content-Type: text/x-rst Created: 06-Apr-2023 Post-History: 06-Apr-2023 Abstract “Like wheels, but instead of a pre-built python package, it’s a pre-built python interpreter” Motivation End goal: Pypi.org has pre-built packages for all Python versions on all popular platforms, so automated tools can easily grab any of them and set it up. It becomes quick and easy to try Python prereleases, pin Python versions in CI, make a temporary environment to reproduce a bug report that only happens on a specific Python point release, etc. First step (this PEP): define a standard packaging file format to hold pre-built Python interpreters, that reuses existing Python packaging standards as much as possible. Examples Example pybi builds are available at pybi.vorpus.org. They're zip files, so you can unpack them and poke around inside if you want to get a feel for how they're laid out. You can also look at the tooling I used to create them. Specification Filename Filename: {distribution}-{version}[-{build tag}]-{platform tag}.pybi This matches the wheel file format defined in PEP 427, except dropping the {python tag} and {abi tag} and changing the extension from .whl → .pybi. For example: - cpython-3.9.3-manylinux_2014.pybi - cpython-3.10b2-win_amd64.pybi Just like for wheels, if a pybi supports multiple platforms, you can separate them by dots to make a “compressed tag set”: - cpython-3.9.5-macosx_11_0_x86_64.macosx_11_0_arm64.pybi (Though in practice this probably won’t be used much, e.g. the above filename is more idiomatically written as cpython-3.9.5-macosx_11_0_universal2.pybi.) File contents A .pybi file is a zip file, that can be unpacked directly into an arbitrary location and then used as a self-contained Python environment. There’s no .data directory or install scheme keys, because the Python environment knows which install scheme it’s using, so it can just put things in the right places to start with. The “arbitrary location” part is important: the pybi can’t contain any hardcoded absolute paths. In particular, any preinstalled scripts MUST NOT embed absolute paths in their shebang lines. Similar to wheels’ <package>-<version>.dist-info directory, the pybi archive must contain a top-level directory named pybi-info/. (Rationale: calling it pybi-info instead dist-info makes sure that tools don’t get confused about which kind of metadata they’re looking at; leaving off the {name}-{version} part is fine because only one pybi can be installed into a given directory.) The pybi-info/ directory contains at least the following files: - .../PYBI: metadata about the archive itself, in the same RFC822-ish format as METADATA and WHEEL files: Pybi-Version: 1.0 Generator: {name} {version} Tag: {platform tag} Tag: {another platform tag} Tag: {...and so on...} Build: 1 # optional - .../RECORD: same as in wheels, except see the note about symlinks, below. - .../METADATA: In the same format as described in the current core metadata spec, except that the following keys are forbidden because they don’t make sense: - Requires-Dist - Provides-Extra - Requires-Python And also there are some new, required keys described below. Pybi-specific core metadata Here's an example of the new METADATA fields, before we give the full details: Pybi-Environment-Marker-Variables: {"implementation_name": "cpython", "implementation_version": "3.10.8", "os_name": "posix", "platform_machine": "x86_64", "platform_system": "Linux", "python_full_version": "3.10.8", "platform_python_implementation": "CPython", "python_version": "3.10", "sys_platform": "linux"} Pybi-Paths: {"stdlib": "lib/python3.10", "platstdlib": "lib/python3.10", "purelib": "lib/python3.10/site-packages", "platlib": "lib/python3.10/site-packages", "include": "include/python3.10", "platinclude": "include/python3.10", "scripts": "bin", "data": "."} Pybi-Wheel-Tag: cp310-cp310-PLATFORM Pybi-Wheel-Tag: cp310-abi3-PLATFORM Pybi-Wheel-Tag: cp310-none-PLATFORM Pybi-Wheel-Tag: cp39-abi3-PLATFORM Pybi-Wheel-Tag: cp38-abi3-PLATFORM Pybi-Wheel-Tag: cp37-abi3-PLATFORM Pybi-Wheel-Tag: cp36-abi3-PLATFORM Pybi-Wheel-Tag: cp35-abi3-PLATFORM Pybi-Wheel-Tag: cp34-abi3-PLATFORM Pybi-Wheel-Tag: cp33-abi3-PLATFORM Pybi-Wheel-Tag: cp32-abi3-PLATFORM Pybi-Wheel-Tag: py310-none-PLATFORM Pybi-Wheel-Tag: py3-none-PLATFORM Pybi-Wheel-Tag: py39-none-PLATFORM Pybi-Wheel-Tag: py38-none-PLATFORM Pybi-Wheel-Tag: py37-none-PLATFORM Pybi-Wheel-Tag: py36-none-PLATFORM Pybi-Wheel-Tag: py35-none-PLATFORM Pybi-Wheel-Tag: py34-none-PLATFORM Pybi-Wheel-Tag: py33-none-PLATFORM Pybi-Wheel-Tag: py32-none-PLATFORM Pybi-Wheel-Tag: py31-none-PLATFORM Pybi-Wheel-Tag: py30-none-PLATFORM Pybi-Wheel-Tag: py310-none-any Pybi-Wheel-Tag: py3-none-any Pybi-Wheel-Tag: py39-none-any Pybi-Wheel-Tag: py38-none-any Pybi-Wheel-Tag: py37-none-any Pybi-Wheel-Tag: py36-none-any Pybi-Wheel-Tag: py35-none-any Pybi-Wheel-Tag: py34-none-any Pybi-Wheel-Tag: py33-none-any Pybi-Wheel-Tag: py32-none-any Pybi-Wheel-Tag: py31-none-any Pybi-Wheel-Tag: py30-none-any Specification: - Pybi-Environment-Marker-Variables: The value of all PEP 508 environment marker variables that are static across installs of this Pybi, as a JSON dict. So for example: - python_version will always be present, because a Python 3.10 package always has python_version == "3.10". - platform_version will generally not be present, because it gives detailed information about the OS where Python is running, for example: #60-Ubuntu SMP Thu May 6 07:46:32 UTC 2021 platform_release has similar issues. - platform_machine will usually be present, except for macOS universal2 pybis: these can potentially be run in either x86-64 or arm64 mode, and we don't know which until the interpreter is actually invoked, so we can't record it in static metadata. Rationale: In many cases, this should allow a resolver running on Linux to compute package pins for a Python environment on Windows, or vice-versa, so long as the resolver has access to the target platform’s .pybi file. (Note that Requires-Python constraints can be checked by using the python_full_version value.) While we have to leave out a few keys sometimes, they're either fairly useless (platform_version, platform_release) or can be reconstructed by the resolver (platform_machine). The markers are also just generally useful information to have accessible. For example, if you have a pypy3-7.3.2 pybi, and you want to know what version of the Python language that supports, then that’s recorded in the python_version marker. (Note: we may want to deprecate/remove platform_version and platform_release? They're problematic and I can't figure out any cases where they're useful. But that's out of scope of this particular PEP.) - Pybi-Paths: The install paths needed to install wheels (same keys as sysconfig.get_paths()), as relative paths starting at the root of the zip file, as a JSON dict. These paths MUST be written in Unix format, using forward slashes as a separator, not backslashes. It must be possible to invoke the Python interpreter by running {paths["scripts"]}/python. If there are alternative interpreter entry points (e.g. pythonw for Windows GUI apps), then they should also be in that directory under their conventional names, with no version number attached. (You can also have a python3.11 symlink if you want; there’s no rule against that. It’s just that python has to exist and work.) Rationale: Pybi-Paths and Pybi-Wheel-Tags (see below) are together enough to let an installer choose wheels and install them into an unpacked pybi environment, without invoking Python. Besides, we need to write down the interpreter location somewhere, so it’s two birds with one stone. - Pybi-Wheel-Tag: The wheel tags supported by this interpreter, in preference order (most-preferred first, least-preferred last), except that the special platform tag PLATFORM should replace any platform tags that depend on the final installation system. Discussion: It would be nice™ if installers could compute a pybi’s corresponding wheel tags ahead of time, so that they could install wheels into the unpacked pybi without needing to actually invoke the python interpreter to query its tags – both for efficiency and to allow for more exotic use cases like setting up a Windows environment from a Linux host. But unfortunately, it’s impossible to compute the full set of platform tags supported by a Python installation ahead of time, because they can depend on the final system: - A pybi tagged manylinux_2_12_x86_64 can always use wheels tagged as manylinux_2_12_x86_64. It also might be able to use wheels tagged manylinux_2_17_x86_64, but only if the final installation system has glibc 2.17+. - A pybi tagged macosx_11_0_universal2 (= x86-64 + arm64 support in the same binary) might be able to use wheels tagged as macosx_11_0_arm64, but only if it’s installed on an “Apple Silicon” machine and running in arm64 mode. In these two cases, an installation tool can still work out the appropriate set of wheel tags by computing the local platform tags, taking the wheel tag templates from Pybi-Wheel-Tag, and swapping in the actual supported platforms in place of the magic PLATFORM string. However, there are other cases that are even more complicated: - You can (usually) run both 32- and 64-bit apps on 64-bit Windows. So a pybi installer might compute the set of allowable pybi tags on the current platform as [win32, win_amd64]. But you can’t then just take that set and swap it into the pybi’s wheel tag template or you get nonsense: [ "cp39-cp39-win32", "cp39-cp39-win_amd64", "cp39-abi3-win32", "cp39-abi3-win_amd64", ... ] To handle this, the installer needs to somehow understand that a manylinux_2_12_x86_64 pybi can use a manylinux_2_17_x86_64 wheel as long as those are both valid tags on the current machine, but a win32 pybi can’t use a win_amd64 wheel, even if those are both valid tags on the current machine. - A pybi tagged macosx_11_0_universal2 might be able to use wheels tagged as macosx_11_0_x86_64, but only if it’s installed on an x86-64 machine or it’s installed on an ARM machine and the interpreter is invoked with the magic incantation that tells macOS to run a binary in x86-64 mode. So how the installer plans to invoke the pybi matters too! So actually using Pybi-Wheel-Tag values is less trivial than it might seem, and they’re probably only useful with fairly sophisticated tooling. But, smart pybi installers will already have to understand a lot of these platform compatibility issues in order to select a working pybi, and for the cross-platform pinning/environment building case, users can potentially provide whatever information is needed to disambiguate exactly what platform they’re targeting. So, it’s still useful enough to include in the PyBI metadata -- tools that don't find it useful can simply ignore it. You can probably generate these metadata values by running this script on the built interpreter: import packaging.markers import packaging.tags import sysconfig import os.path import json import sys marker_vars = packaging.markers.default_environment() # Delete any keys that depend on the final installation del marker_vars["platform_release"] del marker_vars["platform_version"] # Darwin binaries are often multi-arch, so play it safe and # delete the architecture marker. (Better would be to only # do this if the pybi actually is multi-arch.) if marker_vars["sys_platform"] == "darwin": del marker_vars["platform_machine"] # Copied and tweaked version of packaging.tags.sys_tags tags = [] interp_name = packaging.tags.interpreter_name() if interp_name == "cp": tags += list(packaging.tags.cpython_tags(platforms=["xyzzy"])) else: tags += list(packaging.tags.generic_tags(platforms=["xyzzy"])) tags += list(packaging.tags.compatible_tags(platforms=["xyzzy"])) # Gross hack: packaging.tags normalizes platforms by lowercasing them, # so we generate the tags with a unique string and then replace it # with our special uppercase placeholder. str_tags = [str(t).replace("xyzzy", "PLATFORM") for t in tags] (base_path,) = sysconfig.get_config_vars("installed_base") # For some reason, macOS framework builds report their # installed_base as a directory deep inside the framework. while "Python.framework" in base_path: base_path = os.path.dirname(base_path) paths = {key: os.path.relpath(path, base_path).replace("\\", "/") for (key, path) in sysconfig.get_paths().items()} json.dump({"marker_vars": marker_vars, "tags": str_tags, "paths": paths}, sys.stdout) This emits a JSON dict on stdout with separate entries for each set of pybi-specific tags. Symlinks Currently, symlinks are used by default in all Unix Python installs (e.g., bin/python3 -> bin/python3.9). And furthermore, symlinks are required to store macOS framework builds in .pybi files. So, unlike wheel files, we absolutely have to support symlinks in .pybi files for them to be useful at all. Representing symlinks in zip files The de-facto standard for representing symlinks in zip files is the Info-Zip symlink extension, which works as follows: - The symlink’s target path is stored as if it were the file contents - The top 4 bits of the Unix permissions field are set to 0xa, i.e.: permissions & 0xf000 == 0xa000 - The Unix permissions field, in turn, is stored as the top 16 bits of the “external attributes” field. So if using Python’s zipfile module, you can check whether a ZipInfo represents a symlink by doing: (zip_info.external_attr >> 16) & 0xf000 == 0xa000 Or if using Rust’s zip crate, the equivalent check is: fn is_symlink(zip_file: &zip::ZipFile) -> bool { match zip_file.unix_mode() { Some(mode) => mode & 0xf000 == 0xa000, None => false, } } If you’re on Unix, your zip and unzip commands probably understands this format already. Representing symlinks in RECORD files Normally, a RECORD file lists each file + its hash + its length: my/favorite/file,sha256=...,12345 For symlinks, we instead write: name/of/symlink,symlink=path/to/symlink/target, That is: we use a special “hash function” called symlink, and then store the actual symlink target as the “hash value”. And the length is left empty. Rationale: we’re already committed to the RECORD file containing a redundant check on everything in the main archive, so for symlinks we at least need to store some kind of hash, plus some kind of flag to indicate that this is a symlink. Given that symlink target strings are roughly the same size as a hash, we might as well store them directly. This also makes the symlink information easier to access for tools that don’t understand the Info-Zip symlink extension, and makes it possible to losslessly unpack and repack a Unix pybi on a Windows system, which someone might find handy at some point. Storing symlinks in pybi files When a pybi creator stores a symlink, they MUST use both of the mechanisms defined above: storing it in the zip archive directly using the Info-Zip representation, and also recording it in the RECORD file. Pybi consumers SHOULD validate that the symlinks in the archive and RECORD file are consistent with each other. We also considered using only the RECORD file to store symlinks, but then the vanilla unzip tool wouldn’t be able to unpack them, and that would make it hard to install a pybi from a shell script. Limitations Symlinks enable a lot of potential messiness. To keep things under control, we impose the following restrictions: - Symlinks MUST NOT be used in .pybis targeting Windows, or other platforms that are missing first-class symlink support. - Symlinks MUST NOT be used inside the pybi-info directory. (Rationale: there’s no need, and it makes things simpler for resolvers that need to extract info from pybi-info without unpacking the whole archive.) - Symlink targets MUST be relative paths, and MUST be inside the pybi directory. - If A/B/... is recorded as a symlink in the archive, then there MUST NOT be any other entries in the archive named like A/B/.../C. For example, if an archive has a symlink foo -> bar, and then later in the archive there’s a regular file named foo/blah.py, then a naive unpacker could potentially end up writing a file called bar/blah.py. Don’t be naive. Unpackers MUST verify that these rules are followed, because without them attackers could create evil symlinks like foo -> /etc/passwd or foo -> ../../../../../etc + foo/passwd -> ... and cause havoc. Non-normative comments Why not just use conda? This isn't really in the scope of this PEP, but since conda is a popular way to distribute binary Python interpreters, it's a natural question. The simple answer is: conda is great! But, there are lots of python users who aren't conda users, and they deserve nice things too. This PEP just gives them another option. The deeper answer is: the maintainers who upload packages to PyPI are the backbone of the Python ecosystem. They're the first audience for Python packaging tools. And one thing they want is to upload a package once, and have it be accessible across all the different ways Python is deployed: in Debian and Fedora and Homebrew and FreeBSD, in Conda environments, in big companies' monorepos, in Nix, in Blender plugins, in RenPy games, ..... you get the idea. All of these environments have their own tooling and strategies for managing packages and dependencies. So what's special about PyPI and wheels is that they're designed to describe dependencies in a standard, abstract way, that all these downstream systems can consume and convert into their local conventions. That's why package maintainers use Python-specific metadata and upload to PyPI: because it lets them address all of those systems simultaneously. Every time you build a Python package for conda, there's an intermediate wheel that's generated, because wheels are the common language that Python package build systems and conda can use to talk to each other. But then, if you're a maintainer releasing an sdist+wheels, then you naturally want to test what you're releasing, which may depend on arbitrary PyPI packages and versions. So you need tools that build Python environments directly from PyPI, and conda is fundamentally not designed to do that. So conda and pip are both necessary for different cases, and this proposal happens to be targeting the pip side of that equation. Sdists (or not) It might be cool to have an “sdist” equivalent for pybis, i.e., some kind of format for a Python source release that’s structured-enough to let tools automatically fetch and build it into a pybi, for platforms where prebuilt pybis aren’t available. But, this isn’t necessary for the MVP and opens a can of worms, so let’s worry about it later. What packages should be bundled inside a pybi? Pybi builders have the power to pick and choose what exactly goes inside. For example, you could include some preinstalled packages in the pybi’s site-packages directory, or prune out bits of the stdlib that you don’t want. We can’t stop you! Though if you do preinstall packages, then it's strongly recommended to also include the correct metadata (.dist-info etc.), so that it’s possible for Pip or other tools to understand out what’s going on. For my prototype “general purpose” pybi’s, what I chose is: - Make sure site-packages is empty. Rationale: for traditional standalone python installers that are targeted at end-users, you probably want to include at least pip, to avoid bootstrapping issues (PEP 453). But pybis are different: they’re designed to be installed by “smart” tooling, that consume the pybi as part of some kind of larger automated deployment process. It’s easier for these installers to start from a blank slate and then add whatever they need, than for them to start with some preinstalled packages that they may or may not want. (And besides, you can still run python -m ensurepip.) - Include the full stdlib, except for test. Rationale: the top-level test module contains CPython’s own test suite. It’s huge (CPython without test is ~37 MB, then test adds another ~25 MB on top of that!), and essentially never used by regular user code. Also, as precedent, the official nuget packages, the official manylinux images, and multiple Linux distributions all leave it out, and this hasn’t caused any major problems. So this seems like the best way to balance broad compatibility with reasonable download/install sizes. - I’m not shipping any .pyc files. They take up space in the download, can be generated on the final system at minimal cost, and dropping them removes a source of location-dependence. (.pyc files store the absolute path of the corresponding .py file and include it in tracebacks; but, pybis are relocatable, so the correct path isn’t known until after install.) Backwards Compatibility No backwards compatibility considerations. Security Implications No security implications, beyond the fact that anyone who takes it upon themselves to distribute binaries has to come up with a plan to manage their security (e.g., whether they roll a new build after an OpenSSL CVE drops). But collectively, we core Python folks are already maintaining binary builds for all major platforms (macOS + Windows through python.org, and Linux builds through the official manylinux image), so even if we do start releasing official CPython builds on PyPI it doesn't really raise any new security issues. How to Teach This This isn't targeted at end-users; their experience will simply be that e.g. their pyenv or tox invocation magically gets faster and more reliable (if those projects' maintainers decide to take advantage of this PEP). Copyright This document is placed in the public domain or under the CC0-1.0-Universal license, whichever is more permissive.
python-peps
2024-10-18T13:23:33.221913
2023-04-06T00:00:00
{ "license": "Public Domain", "url": "https://peps.python.org/pep-0711/", "authors": [ "Nathaniel J. Smith" ], "pep_number": "0711", "pandoc_version": "3.5" }
0566
PEP: 566 Title: Metadata for Python Software Packages 2.1 Author: Dustin Ingram <[email protected]> BDFL-Delegate: Daniel Holth Discussions-To: [email protected] Status: Final Type: Standards Track Topic: Packaging Content-Type: text/x-rst Created: 01-Dec-2017 Python-Version: 3.x Post-History: Replaces: 345 Resolution: https://mail.python.org/pipermail/distutils-sig/2018-February/032014.html packaging:core-metadata Abstract This PEP describes the changes between versions 1.2 and 2.1 of the core metadata specification for Python packages. Version 1.2 is specified in PEP 345. It also changes to the canonical source for field specifications to the Core Metadata Specification reference document, which includes specifics of the field names, and their semantics and usage. Fields The canonical source for the names and semantics of each of the supported metadata fields is the Core Metadata Specification document. Fields marked with "(Multiple use)" may be specified multiple times in a single PKG-INFO file. Other fields may only occur once in a PKG-INFO file. Fields marked with "(optional)" are not required to appear in a valid PKG-INFO file; all other fields must be present. New in Version 2.1 Description-Content-Type (optional) A string stating the markup syntax (if any) used in the distribution's description, so that tools can intelligently render the description. Historically, tools like PyPI assume that a package's description is formatted in reStructuredText (reST), and fall back on plain text if the description is not valid reST. The introduction of this field allows PyPI to support additional types of markup syntax, and not need to make this assumption. The full specification for this field is defined in the Core Metadata Specification. Provides-Extra (optional, multiple use) A string containing the name of an optional feature. Must be a valid Python identifier. May be used to make a dependency conditional on whether the optional feature has been requested. This introduction of this field allows package installation tools (such as pip) to determine which extras are provided by a given package, and so that package publication tools (such as twine) can check for issues with environment markers which use extras. The full specification for this field is defined in the Core Metadata Specification. Changed in Version 2.1 Name The specification for the format of this field is now identical to the distribution name specification defined in PEP 508. Description In addition to the Description header field, the distribution's description may instead be provided in the message body (i.e., after a completely blank line following the headers, with no indentation or other special formatting necessary). Version Specifiers Version numbering requirements and the semantics for specifying comparisons between versions are defined in PEP 440. Direct references as defined in PEP 440 are also permitted as an alternative to version specifiers. Following PEP 508, version specifiers no longer need to be surrounded by parentheses in the fields Requires-Dist, Provides-Dist, Obsoletes-Dist or Requires-External, so e.g. requests >= 2.8.1 is now a valid value. The recommended format is without parentheses, but tools parsing metadata should also be able to handle version specifiers in parentheses. Further, public index servers MAY prohibit strict version matching clauses or direct references in these fields. Usage of version specifiers is otherwise unchanged from PEP 345. Environment markers An environment marker is a marker that can be added at the end of a field after a semi-colon (";"), to add a condition about the execution environment. The environment marker format used to declare such a condition is defined in the environment markers section of PEP 508. Usage of environment markers is otherwise unchanged from PEP 345. JSON-compatible Metadata It may be necessary to store metadata in a data structure which does not allow for multiple repeated keys, such as JSON. The canonical method to transform metadata fields into such a data structure is as follows: 1. The original key-value format should be read with email.parser.HeaderParser; 2. All transformed keys should be reduced to lower case. Hyphens should be replaced with underscores, but otherwise should retain all other characters; 3. The transformed value for any field marked with "(Multiple-use") should be a single list containing all the original values for the given key; 4. The Keywords field should be converted to a list by splitting the original value on commas; 5. The message body, if present, should be set to the value of the description key. 6. The result should be stored as a string-keyed dictionary. Summary of Differences From PEP 345 - Metadata-Version is now 2.1. - Fields are now specified via the Core Metadata Specification. - Added two new fields: Description-Content-Type and Provides-Extra - Acceptable values for the Name field are now specified as per PEP 508. - Added canonical method of transformation into JSON-compatible data structure. References This document specifies version 2.1 of the metadata format. Version 1.0 is specified in PEP 241. Version 1.1 is specified in PEP 314. Version 1.2 is specified in PEP 345. Version 2.0, while not formally accepted, was specified in PEP 426. Copyright This document has been placed in the public domain. Acknowledgements Thanks to Alyssa Coghlan and Thomas Kluyver for contributing to this PEP. Local Variables: mode: indented-text indent-tabs-mode: nil sentence-end-double-space: t fill-column: 80 End:
python-peps
2024-10-18T13:23:33.234809
2017-12-01T00:00:00
{ "license": "Public Domain", "url": "https://peps.python.org/pep-0566/", "authors": [ "Dustin Ingram" ], "pep_number": "0566", "pandoc_version": "3.5" }
0567
PEP: 567 Title: Context Variables Version: $Revision$ Last-Modified: $Date$ Author: Yury Selivanov <[email protected]> Status: Final Type: Standards Track Content-Type: text/x-rst Created: 12-Dec-2017 Python-Version: 3.7 Post-History: 12-Dec-2017, 28-Dec-2017, 16-Jan-2018 Abstract This PEP proposes a new contextvars module and a set of new CPython C APIs to support context variables. This concept is similar to thread-local storage (TLS), but, unlike TLS, it also allows correctly keeping track of values per asynchronous task, e.g. asyncio.Task. This proposal is a simplified version of PEP 550. The key difference is that this PEP is concerned only with solving the case for asynchronous tasks, not for generators. There are no proposed modifications to any built-in types or to the interpreter. This proposal is not strictly related to Python Context Managers. Although it does provide a mechanism that can be used by Context Managers to store their state. API Design and Implementation Revisions In Python 3.7.1 the signatures of all context variables C APIs were changed to use PyObject * pointers instead of PyContext *, PyContextVar *, and PyContextToken *, e.g.: // in 3.7.0: PyContext *PyContext_New(void); // in 3.7.1+: PyObject *PyContext_New(void); See[1] for more details. The C API section of this PEP was updated to reflect the change. Rationale Thread-local variables are insufficient for asynchronous tasks that execute concurrently in the same OS thread. Any context manager that saves and restores a context value using threading.local() will have its context values bleed to other code unexpectedly when used in async/await code. A few examples where having a working context local storage for asynchronous code is desirable: - Context managers like decimal contexts and numpy.errstate. - Request-related data, such as security tokens and request data in web applications, language context for gettext, etc. - Profiling, tracing, and logging in large code bases. Introduction The PEP proposes a new mechanism for managing context variables. The key classes involved in this mechanism are contextvars.Context and contextvars.ContextVar. The PEP also proposes some policies for using the mechanism around asynchronous tasks. The proposed mechanism for accessing context variables uses the ContextVar class. A module (such as decimal) that wishes to use the new mechanism should: - declare a module-global variable holding a ContextVar to serve as a key; - access the current value via the get() method on the key variable; - modify the current value via the set() method on the key variable. The notion of "current value" deserves special consideration: different asynchronous tasks that exist and execute concurrently may have different values for the same key. This idea is well known from thread-local storage but in this case the locality of the value is not necessarily bound to a thread. Instead, there is the notion of the "current Context" which is stored in thread-local storage. Manipulation of the current context is the responsibility of the task framework, e.g. asyncio. A Context is a mapping of ContextVar objects to their values. The Context itself exposes the abc.Mapping interface (not abc.MutableMapping!), so it cannot be modified directly. To set a new value for a context variable in a Context object, the user needs to: - make the Context object "current" using the Context.run() method; - use ContextVar.set() to set a new value for the context variable. The ContextVar.get() method looks for the variable in the current Context object using self as a key. It is not possible to get a direct reference to the current Context object, but it is possible to obtain a shallow copy of it using the contextvars.copy_context() function. This ensures that the caller of Context.run() is the sole owner of its Context object. Specification A new standard library module contextvars is added with the following APIs: 1. The copy_context() -> Context function is used to get a copy of the current Context object for the current OS thread. 2. The ContextVar class to declare and access context variables. 3. The Context class encapsulates context state. Every OS thread stores a reference to its current Context instance. It is not possible to control that reference directly. Instead, the Context.run(callable, *args, **kwargs) method is used to run Python code in another context. contextvars.ContextVar The ContextVar class has the following constructor signature: ContextVar(name, *, default=_NO_DEFAULT). The name parameter is used for introspection and debug purposes, and is exposed as a read-only ContextVar.name attribute. The default parameter is optional. Example: # Declare a context variable 'var' with the default value 42. var = ContextVar('var', default=42) (The _NO_DEFAULT is an internal sentinel object used to detect if the default value was provided.) ContextVar.get(default=_NO_DEFAULT) returns a value for the context variable for the current Context: # Get the value of `var`. var.get() If there is no value for the variable in the current context, ContextVar.get() will: - return the value of the default argument of the get() method, if provided; or - return the default value for the context variable, if provided; or - raise a LookupError. ContextVar.set(value) -> Token is used to set a new value for the context variable in the current Context: # Set the variable 'var' to 1 in the current context. var.set(1) ContextVar.reset(token) is used to reset the variable in the current context to the value it had before the set() operation that created the token (or to remove the variable if it was not set): # Assume: var.get(None) is None # Set 'var' to 1: token = var.set(1) try: # var.get() == 1 finally: var.reset(token) # After reset: var.get(None) is None, # i.e. 'var' was removed from the current context. The ContextVar.reset() method raises: - a ValueError if it is called with a token object created by another variable; - a ValueError if the current Context object does not match the one where the token object was created; - a RuntimeError if the token object has already been used once to reset the variable. contextvars.Token contextvars.Token is an opaque object that should be used to restore the ContextVar to its previous value, or to remove it from the context if the variable was not set before. It can be created only by calling ContextVar.set(). For debug and introspection purposes it has: - a read-only attribute Token.var pointing to the variable that created the token; - a read-only attribute Token.old_value set to the value the variable had before the set() call, or to Token.MISSING if the variable wasn't set before. contextvars.Context Context object is a mapping of context variables to values. Context() creates an empty context. To get a copy of the current Context for the current OS thread, use the contextvars.copy_context() method: ctx = contextvars.copy_context() To run Python code in some Context, use Context.run() method: ctx.run(function) Any changes to any context variables that function causes will be contained in the ctx context: var = ContextVar('var') var.set('spam') def main(): # 'var' was set to 'spam' before # calling 'copy_context()' and 'ctx.run(main)', so: # var.get() == ctx[var] == 'spam' var.set('ham') # Now, after setting 'var' to 'ham': # var.get() == ctx[var] == 'ham' ctx = copy_context() # Any changes that the 'main' function makes to 'var' # will be contained in 'ctx'. ctx.run(main) # The 'main()' function was run in the 'ctx' context, # so changes to 'var' are contained in it: # ctx[var] == 'ham' # However, outside of 'ctx', 'var' is still set to 'spam': # var.get() == 'spam' Context.run() raises a RuntimeError when called on the same context object from more than one OS thread, or when called recursively. Context.copy() returns a shallow copy of the context object. Context objects implement the collections.abc.Mapping ABC. This can be used to introspect contexts: ctx = contextvars.copy_context() # Print all context variables and their values in 'ctx': print(ctx.items()) # Print the value of 'some_variable' in context 'ctx': print(ctx[some_variable]) Note that all Mapping methods, including Context.__getitem__ and Context.get, ignore default values for context variables (i.e. ContextVar.default). This means that for a variable var that was created with a default value and was not set in the context: - context[var] raises a KeyError, - var in context returns False, - the variable isn't included in context.items(), etc. asyncio asyncio uses Loop.call_soon(), Loop.call_later(), and Loop.call_at() to schedule the asynchronous execution of a function. asyncio.Task uses call_soon() to run the wrapped coroutine. We modify Loop.call_{at,later,soon} and Future.add_done_callback() to accept the new optional context keyword-only argument, which defaults to the current context: def call_soon(self, callback, *args, context=None): if context is None: context = contextvars.copy_context() # ... some time later context.run(callback, *args) Tasks in asyncio need to maintain their own context that they inherit from the point they were created at. asyncio.Task is modified as follows: class Task: def __init__(self, coro): ... # Get the current context snapshot. self._context = contextvars.copy_context() self._loop.call_soon(self._step, context=self._context) def _step(self, exc=None): ... # Every advance of the wrapped coroutine is done in # the task's context. self._loop.call_soon(self._step, context=self._context) ... Implementation This section explains high-level implementation details in pseudo-code. Some optimizations are omitted to keep this section short and clear. The Context mapping is implemented using an immutable dictionary. This allows for a O(1) implementation of the copy_context() function. The reference implementation implements the immutable dictionary using Hash Array Mapped Tries (HAMT); see PEP 550 for analysis of HAMT performance[2]. For the purposes of this section, we implement an immutable dictionary using a copy-on-write approach and the built-in dict type: class _ContextData: def __init__(self): self._mapping = dict() def __getitem__(self, key): return self._mapping[key] def __contains__(self, key): return key in self._mapping def __len__(self): return len(self._mapping) def __iter__(self): return iter(self._mapping) def set(self, key, value): copy = _ContextData() copy._mapping = self._mapping.copy() copy._mapping[key] = value return copy def delete(self, key): copy = _ContextData() copy._mapping = self._mapping.copy() del copy._mapping[key] return copy Every OS thread has a reference to the current Context object: class PyThreadState: context: Context contextvars.Context is a wrapper around _ContextData: class Context(collections.abc.Mapping): _data: _ContextData _prev_context: Optional[Context] def __init__(self): self._data = _ContextData() self._prev_context = None def run(self, callable, *args, **kwargs): if self._prev_context is not None: raise RuntimeError( f'cannot enter context: {self} is already entered') ts: PyThreadState = PyThreadState_Get() self._prev_context = ts.context try: ts.context = self return callable(*args, **kwargs) finally: ts.context = self._prev_context self._prev_context = None def copy(self): new = Context() new._data = self._data return new # Implement abstract Mapping.__getitem__ def __getitem__(self, var): return self._data[var] # Implement abstract Mapping.__contains__ def __contains__(self, var): return var in self._data # Implement abstract Mapping.__len__ def __len__(self): return len(self._data) # Implement abstract Mapping.__iter__ def __iter__(self): return iter(self._data) # The rest of the Mapping methods are implemented # by collections.abc.Mapping. contextvars.copy_context() is implemented as follows: def copy_context(): ts: PyThreadState = PyThreadState_Get() return ts.context.copy() contextvars.ContextVar interacts with PyThreadState.context directly: class ContextVar: def __init__(self, name, *, default=_NO_DEFAULT): self._name = name self._default = default @property def name(self): return self._name def get(self, default=_NO_DEFAULT): ts: PyThreadState = PyThreadState_Get() try: return ts.context[self] except KeyError: pass if default is not _NO_DEFAULT: return default if self._default is not _NO_DEFAULT: return self._default raise LookupError def set(self, value): ts: PyThreadState = PyThreadState_Get() data: _ContextData = ts.context._data try: old_value = data[self] except KeyError: old_value = Token.MISSING updated_data = data.set(self, value) ts.context._data = updated_data return Token(ts.context, self, old_value) def reset(self, token): if token._used: raise RuntimeError("Token has already been used once") if token._var is not self: raise ValueError( "Token was created by a different ContextVar") ts: PyThreadState = PyThreadState_Get() if token._context is not ts.context: raise ValueError( "Token was created in a different Context") if token._old_value is Token.MISSING: ts.context._data = ts.context._data.delete(token._var) else: ts.context._data = ts.context._data.set(token._var, token._old_value) token._used = True Note that the in the reference implementation, ContextVar.get() has an internal cache for the most recent value, which allows to bypass a hash lookup. This is similar to the optimization the decimal module implements to retrieve its context from PyThreadState_GetDict(). See PEP 550 which explains the implementation of the cache in great detail. The Token class is implemented as follows: class Token: MISSING = object() def __init__(self, context, var, old_value): self._context = context self._var = var self._old_value = old_value self._used = False @property def var(self): return self._var @property def old_value(self): return self._old_value Summary of the New APIs Python API 1. A new contextvars module with ContextVar, Context, and Token classes, and a copy_context() function. 2. asyncio.Loop.call_at(), asyncio.Loop.call_later(), asyncio.Loop.call_soon(), and asyncio.Future.add_done_callback() run callback functions in the context they were called in. A new context keyword-only parameter can be used to specify a custom context. 3. asyncio.Task is modified internally to maintain its own context. C API 1. PyObject * PyContextVar_New(char *name, PyObject *default): create a ContextVar object. The default argument can be NULL, which means that the variable has no default value. 2. int PyContextVar_Get(PyObject *, PyObject *default_value, PyObject **value): return -1 if an error occurs during the lookup, 0 otherwise. If a value for the context variable is found, it will be set to the value pointer. Otherwise, value will be set to default_value when it is not NULL. If default_value is NULL, value will be set to the default value of the variable, which can be NULL too. value is always a new reference. 3. PyObject * PyContextVar_Set(PyObject *, PyObject *): set the value of the variable in the current context. 4. PyContextVar_Reset(PyObject *, PyObject *): reset the value of the context variable. 5. PyObject * PyContext_New(): create a new empty context. 6. PyObject * PyContext_Copy(PyObject *): return a shallow copy of the passed context object. 7. PyObject * PyContext_CopyCurrent(): get a copy of the current context. 8. int PyContext_Enter(PyObject *) and int PyContext_Exit(PyObject *) allow to set and restore the context for the current OS thread. It is required to always restore the previous context: PyObject *old_ctx = PyContext_Copy(); if (old_ctx == NULL) goto error; if (PyContext_Enter(new_ctx)) goto error; // run some code if (PyContext_Exit(old_ctx)) goto error; Rejected Ideas Replicating threading.local() interface Please refer to PEP 550 where this topic is covered in detail:[3]. Replacing Token with ContextVar.unset() The Token API allows to get around having a ContextVar.unset() method, which is incompatible with chained contexts design of PEP 550. Future compatibility with PEP 550 is desired in case there is demand to support context variables in generators and asynchronous generators. The Token API also offers better usability: the user does not have to special-case absence of a value. Compare: token = cv.set(new_value) try: # cv.get() is new_value finally: cv.reset(token) with: _deleted = object() old = cv.get(default=_deleted) try: cv.set(blah) # code finally: if old is _deleted: cv.unset() else: cv.set(old) Having Token.reset() instead of ContextVar.reset() Nathaniel Smith suggested to implement the ContextVar.reset() method directly on the Token class, so instead of: token = var.set(value) # ... var.reset(token) we would write: token = var.set(value) # ... token.reset() Having Token.reset() would make it impossible for a user to attempt to reset a variable with a token object created by another variable. This proposal was rejected for the reason of ContextVar.reset() being clearer to the human reader of the code which variable is being reset. Making Context objects picklable Proposed by Antoine Pitrou, this could enable transparent cross-process use of Context objects, so the Offloading execution to other threads example would work with a ProcessPoolExecutor too. Enabling this is problematic because of the following reasons: 1. ContextVar objects do not have __module__ and __qualname__ attributes, making straightforward pickling of Context objects impossible. This is solvable by modifying the API to either auto detect the module where a context variable is defined, or by adding a new keyword-only "module" parameter to ContextVar constructor. 2. Not all context variables refer to picklable objects. Making a ContextVar picklable must be an opt-in. Given the time frame of the Python 3.7 release schedule it was decided to defer this proposal to Python 3.8. Making Context a MutableMapping Making the Context class implement the abc.MutableMapping interface would mean that it is possible to set and unset variables using Context[var] = value and del Context[var] operations. This proposal was deferred to Python 3.8+ because of the following: 1. If in Python 3.8 it is decided that generators should support context variables (see PEP 550 and PEP 568), then Context would be transformed into a chain-map of context variables mappings (as every generator would have its own mapping). That would make mutation operations like Context.__delitem__ confusing, as they would operate only on the topmost mapping of the chain. 2. Having a single way of mutating the context (ContextVar.set() and ContextVar.reset() methods) makes the API more straightforward. For example, it would be non-obvious why the below code fragment does not work as expected: var = ContextVar('var') ctx = copy_context() ctx[var] = 'value' print(ctx[var]) # Prints 'value' print(var.get()) # Raises a LookupError While the following code would work: ctx = copy_context() def func(): ctx[var] = 'value' # Contrary to the previous example, this would work # because 'func()' is running within 'ctx'. print(ctx[var]) print(var.get()) ctx.run(func) 3. If Context was mutable it would mean that context variables could be mutated separately (or concurrently) from the code that runs within the context. That would be similar to obtaining a reference to a running Python frame object and modifying its f_locals from another OS thread. Having one single way to assign values to context variables makes contexts conceptually simpler and more predictable, while keeping the door open for future performance optimizations. Having initial values for ContextVars Nathaniel Smith proposed to have a required initial_value keyword-only argument for the ContextVar constructor. The main argument against this proposal is that for some types there is simply no sensible "initial value" except None. E.g. consider a web framework that stores the current HTTP request object in a context variable. With the current semantics it is possible to create a context variable without a default value: # Framework: current_request: ContextVar[Request] = \ ContextVar('current_request') # Later, while handling an HTTP request: request: Request = current_request.get() # Work with the 'request' object: return request.method Note that in the above example there is no need to check if request is None. It is simply expected that the framework always sets the current_request variable, or it is a bug (in which case current_request.get() would raise a LookupError). If, however, we had a required initial value, we would have to guard against None values explicitly: # Framework: current_request: ContextVar[Optional[Request]] = \ ContextVar('current_request', initial_value=None) # Later, while handling an HTTP request: request: Optional[Request] = current_request.get() # Check if the current request object was set: if request is None: raise RuntimeError # Work with the 'request' object: return request.method Moreover, we can loosely compare context variables to regular Python variables and to threading.local() objects. Both of them raise errors on failed lookups (NameError and AttributeError respectively). Backwards Compatibility This proposal preserves 100% backwards compatibility. Libraries that use threading.local() to store context-related values, currently work correctly only for synchronous code. Switching them to use the proposed API will keep their behavior for synchronous code unmodified, but will automatically enable support for asynchronous code. Examples Converting code that uses threading.local() A typical code fragment that uses threading.local() usually looks like the following: class PrecisionStorage(threading.local): # Subclass threading.local to specify a default value. value = 0.0 precision = PrecisionStorage() # To set a new precision: precision.value = 0.5 # To read the current precision: print(precision.value) Such code can be converted to use the contextvars module: precision = contextvars.ContextVar('precision', default=0.0) # To set a new precision: precision.set(0.5) # To read the current precision: print(precision.get()) Offloading execution to other threads It is possible to run code in a separate OS thread using a copy of the current thread context: executor = ThreadPoolExecutor() current_context = contextvars.copy_context() executor.submit(current_context.run, some_function) Reference Implementation The reference implementation can be found here:[4]. See also issue 32436[5]. Acceptance PEP 567 was accepted by Guido on Monday, January 22, 2018[6]. The reference implementation was merged on the same day. References Acknowledgments I thank Guido van Rossum, Nathaniel Smith, Victor Stinner, Elvis Pranskevichus, Alyssa Coghlan, Antoine Pitrou, INADA Naoki, Paul Moore, Eric Snow, Greg Ewing, and many others for their feedback, ideas, edits, criticism, code reviews, and discussions around this PEP. Copyright This document has been placed in the public domain. [1] https://bugs.python.org/issue34762 [2] 550#appendix-hamt-performance-analysis [3] 550#replication-of-threading-local-interface [4] https://github.com/python/cpython/pull/5027 [5] https://bugs.python.org/issue32436 [6] https://mail.python.org/pipermail/python-dev/2018-January/151878.html
python-peps
2024-10-18T13:23:33.266899
2017-12-12T00:00:00
{ "license": "Public Domain", "url": "https://peps.python.org/pep-0567/", "authors": [ "Yury Selivanov" ], "pep_number": "0567", "pandoc_version": "3.5" }
8016
PEP: 8016 Title: The Steering Council Model Author: Nathaniel J. Smith, Donald Stufft Status: Accepted Type: Informational Topic: Governance Content-Type: text/x-rst Created: 01-Nov-2018 Note This PEP is retained for historical purposes, but the official governance document is now PEP 13. Abstract This PEP proposes a model of Python governance based around a steering council. The council has broad authority, which they seek to exercise as rarely as possible; instead, they use this power to establish standard processes, like those proposed in the other 801x-series PEPs. This follows the general philosophy that it's better to split up large changes into a series of small changes that can be reviewed independently: instead of trying to do everything in one PEP, we focus on providing a minimal-but-solid foundation for further governance decisions. PEP Acceptance PEP 8016 was accepted by a core developer vote described in PEP 8001 on Monday, December 17, 2018. Rationale The main goals of this proposal are: - Be boring: We're not experts in governance, and we don't think Python is a good place to experiment with new and untried governance models. So this proposal sticks to mature, well-known, previously tested processes as much as possible. The high-level approach of a mostly-hands-off council is arguably the most common across large successful F/OSS projects, and low-level details are derived directly from Django's governance. - Be simple: We've attempted to pare things down to the minimum needed to make this workable: the council, the core team (who elect the council), and the process for changing the document. The goal is Minimum Viable Governance. - Be comprehensive: But for the things we need to define, we've tried to make sure to cover all the bases, because we don't want to go through this kind of crisis again. Having a clear and unambiguous set of rules also helps minimize confusion and resentment. - Be flexible and light-weight: We know that it will take time and experimentation to find the best processes for working together. By keeping this document as minimal as possible, we keep maximal flexibility for adjusting things later, while minimizing the need for heavy-weight and anxiety-provoking processes like whole-project votes. A number of details were discussed in this Discourse thread, and then this thread has further discussion. These may be useful to anyone trying to understand the rationale for various minor decisions. Specification The steering council Composition The steering council is a 5-person committee. Mandate The steering council shall work to: - Maintain the quality and stability of the Python language and CPython interpreter, - Make contributing as accessible, inclusive, and sustainable as possible, - Formalize and maintain the relationship between the core team and the PSF, - Establish appropriate decision-making processes for PEPs, - Seek consensus among contributors and the core team before acting in a formal capacity, - Act as a "court of final appeal" for decisions where all other methods have failed. Powers The council has broad authority to make decisions about the project. For example, they can: - Accept or reject PEPs - Enforce or update the project's code of conduct - Work with the PSF to manage any project assets - Delegate parts of their authority to other subcommittees or processes However, they cannot modify this PEP, or affect the membership of the core team, except via the mechanisms specified in this PEP. The council should look for ways to use these powers as little as possible. Instead of voting, it's better to seek consensus. Instead of ruling on individual PEPs, it's better to define a standard process for PEP decision making (for example, by accepting one of the other 801x series of PEPs). It's better to establish a Code of Conduct committee than to rule on individual cases. And so on. To use its powers, the council votes. Every council member must either vote or explicitly abstain. Members with conflicts of interest on a particular vote must abstain. Passing requires support from a majority of non-abstaining council members. Whenever possible, the council's deliberations and votes shall be held in public. Electing the council A council election consists of two phases: - Phase 1: Candidates advertise their interest in serving. Candidates must be nominated by a core team member. Self-nominations are allowed. - Phase 2: Each core team member can vote for zero to five of the candidates. Voting is performed anonymously. Candidates are ranked by the total number of votes they receive. If a tie occurs, it may be resolved by mutual agreement among the candidates, or else the winner will be chosen at random. Each phase lasts one to two weeks, at the outgoing council's discretion. For the initial election, both phases will last two weeks. The election process is managed by a returns officer nominated by the outgoing steering council. For the initial election, the returns officer will be nominated by the PSF Executive Director. The council should ideally reflect the diversity of Python contributors and users, and core team members are encouraged to vote accordingly. Term A new council is elected after each feature release. Each council's term runs from when their election results are finalized until the next council's term starts. There are no term limits. Vacancies Council members may resign their position at any time. Whenever there is a vacancy during the regular council term, the council may vote to appoint a replacement to serve out the rest of the term. If a council member drops out of touch and cannot be contacted for a month or longer, then the rest of the council may vote to replace them. Conflicts of interest While we trust council members to act in the best interests of Python rather than themselves or their employers, the mere appearance of any one company dominating Python development could itself be harmful and erode trust. In order to avoid any appearance of conflict of interest, at most 2 members of the council can work for any single employer. In a council election, if 3 of the top 5 vote-getters work for the same employer, then whichever of them ranked lowest is disqualified and the 6th-ranking candidate moves up into 5th place; this is repeated until a valid council is formed. During a council term, if changing circumstances cause this rule to be broken (for instance, due to a council member changing employment), then one or more council members must resign to remedy the issue, and the resulting vacancies can then be filled as normal. Ejecting core team members In exceptional circumstances, it may be necessary to remove someone from the core team against their will. (For example: egregious and ongoing code of conduct violations.) This can be accomplished by a steering council vote, but unlike other steering council votes, this requires at least a two-thirds majority. With 5 members voting, this means that a 3:2 vote is insufficient; 4:1 in favor is the minimum required for such a vote to succeed. In addition, this is the one power of the steering council which cannot be delegated, and this power cannot be used while a vote of no confidence is in process. If the ejected core team member is also on the steering council, then they are removed from the steering council as well. Vote of no confidence In exceptional circumstances, the core team may remove a sitting council member, or the entire council, via a vote of no confidence. A no-confidence vote is triggered when a core team member calls for one publicly on an appropriate project communication channel, and another core team member seconds the proposal. The vote lasts for two weeks. Core team members vote for or against. If at least two thirds of voters express a lack of confidence, then the vote succeeds. There are two forms of no-confidence votes: those targeting a single member, and those targeting the council as a whole. The initial call for a no-confidence vote must specify which type is intended. If a single-member vote succeeds, then that member is removed from the council and the resulting vacancy can be handled in the usual way. If a whole-council vote succeeds, the council is dissolved and a new council election is triggered immediately. The core team Role The core team is the group of trusted volunteers who manage Python. They assume many roles required to achieve the project's goals, especially those that require a high level of trust. They make the decisions that shape the future of the project. Core team members are expected to act as role models for the community and custodians of the project, on behalf of the community and all those who rely on Python. They will intervene, where necessary, in online discussions or at official Python events on the rare occasions that a situation arises that requires intervention. They have authority over the Python Project infrastructure, including the Python Project website itself, the Python GitHub organization and repositories, the bug tracker, the mailing lists, IRC channels, etc. Prerogatives Core team members may participate in formal votes, typically to nominate new team members and to elect the steering council. Membership Python core team members demonstrate: - a good grasp of the philosophy of the Python Project - a solid track record of being constructive and helpful - significant contributions to the project's goals, in any form - willingness to dedicate some time to improving Python As the project matures, contributions go beyond code. Here's an incomplete list of areas where contributions may be considered for joining the core team, in no particular order: - Working on community management and outreach - Providing support on the mailing lists and on IRC - Triaging tickets - Writing patches (code, docs, or tests) - Reviewing patches (code, docs, or tests) - Participating in design decisions - Providing expertise in a particular domain (security, i18n, etc.) - Managing the continuous integration infrastructure - Managing the servers (website, tracker, documentation, etc.) - Maintaining related projects (alternative interpreters, core infrastructure like packaging, etc.) - Creating visual designs Core team membership acknowledges sustained and valuable efforts that align well with the philosophy and the goals of the Python project. It is granted by receiving at least two-thirds positive votes in a core team vote and no veto by the steering council. Core team members are always looking for promising contributors, teaching them how the project is managed, and submitting their names to the core team's vote when they're ready. There's no time limit on core team membership. However, in order to provide the general public with a reasonable idea of how many people maintain Python, core team members who have stopped contributing are encouraged to declare themselves as "inactive". Those who haven't made any non-trivial contribution in two years may be asked to move themselves to this category, and moved there if they don't respond. To record and honor their contributions, inactive team members will continue to be listed alongside active core team members; and, if they later resume contributing, they can switch back to active status at will. While someone is in inactive status, though, they lose their active privileges like voting or nominating for the steering council, and commit access. The initial active core team members will consist of everyone currently listed in the "Python core" team on GitHub, and the initial inactive members will consist of everyone else who has been a committer in the past. Changing this document Changes to this document require at least a two-thirds majority of votes cast in a core team vote. TODO - Lots of people contributed helpful suggestions and feedback; we should check if they're comfortable being added as co-authors - It looks like Aymeric Augustin wrote the whole Django doc, so presumably holds copyright; maybe we should ask him if he's willing to release it into the public domain so our copyright statement below can be simpler. Acknowledgements Substantial text was copied shamelessly from The Django project's governance document. Copyright Text copied from Django used under their license. The rest of this document has been placed in the public domain.
python-peps
2024-10-18T13:23:33.284777
2018-11-01T00:00:00
{ "license": "Public Domain", "url": "https://peps.python.org/pep-8016/", "authors": [ "Donald Stufft", "Nathaniel J. Smith" ], "pep_number": "8016", "pandoc_version": "3.5" }
0314
PEP: 314 Title: Metadata for Python Software Packages 1.1 Author: A.M. Kuchling, Richard Jones Discussions-To: [email protected] Status: Superseded Type: Standards Track Topic: Packaging Content-Type: text/x-rst Created: 12-Apr-2003 Python-Version: 2.5 Post-History: 29-Apr-2003 Replaces: 241 Superseded-By: 345 packaging:core-metadata Introduction This PEP describes a mechanism for adding metadata to Python packages. It includes specifics of the field names, and their semantics and usage. This document specifies version 1.1 of the metadata format. Version 1.0 is specified in PEP 241. Including Metadata in Packages The Distutils sdist command will extract the metadata fields from the arguments and write them to a file in the generated zipfile or tarball. This file will be named PKG-INFO and will be placed in the top directory of the source distribution (where the README, INSTALL, and other files usually go). Developers may not provide their own PKG-INFO file. The sdist command will, if it detects an existing PKG-INFO file, terminate with an appropriate error message. This should prevent confusion caused by the PKG-INFO and setup.py files being out of sync. The PKG-INFO file format is a single set of 822 headers parseable by the rfc822.py module. The field names listed in the following section are used as the header names. Fields This section specifies the names and semantics of each of the supported metadata fields. Fields marked with "(Multiple use)" may be specified multiple times in a single PKG-INFO file. Other fields may only occur once in a PKG-INFO file. Fields marked with "(optional)" are not required to appear in a valid PKG-INFO file; all other fields must be present. Metadata-Version Version of the file format; currently "1.0" and "1.1" are the only legal values here. Example: Metadata-Version: 1.1 Name The name of the package. Example: Name: BeagleVote Version A string containing the package's version number. This field should be parseable by one of the Version classes (StrictVersion or LooseVersion) in the distutils.version module. Example: Version: 1.0a2 Platform (multiple use) A comma-separated list of platform specifications, summarizing the operating systems supported by the package which are not listed in the "Operating System" Trove classifiers. See "Classifier" below. Example: Platform: ObscureUnix, RareDOS Supported-Platform (multiple use) Binary distributions containing a PKG-INFO file will use the Supported-Platform field in their metadata to specify the OS and CPU for which the binary package was compiled. The semantics of the Supported-Platform field are not specified in this PEP. Example: Supported-Platform: RedHat 7.2 Supported-Platform: i386-win32-2791 Summary A one-line summary of what the package does. Example: Summary: A module for collecting votes from beagles. Description (optional) A longer description of the package that can run to several paragraphs. Software that deals with metadata should not assume any maximum size for this field, though people shouldn't include their instruction manual as the description. The contents of this field can be written using reStructuredText markup[1]. For programs that work with the metadata, supporting markup is optional; programs can also display the contents of the field as-is. This means that authors should be conservative in the markup they use. Example: Description: This module collects votes from beagles in order to determine their electoral wishes. Do *not* try to use this module with basset hounds; it makes them grumpy. Keywords (optional) A list of additional keywords to be used to assist searching for the package in a larger catalog. Example: Keywords: dog puppy voting election Home-page (optional) A string containing the URL for the package's home page. Example: Home-page: http://www.example.com/~cschultz/bvote/ Download-URL A string containing the URL from which this version of the package can be downloaded. (This means that the URL can't be something like ".../package-latest.tgz", but instead must be "../package-0.45.tgz".) Author (optional) A string containing the author's name at a minimum; additional contact information may be provided. Example: Author: C. Schultz, Universal Features Syndicate, Los Angeles, CA <[email protected]> Author-email A string containing the author's e-mail address. It can contain a name and e-mail address in the legal forms for a 822 'From:' header. It's not optional because cataloging systems can use the e-mail portion of this field as a unique key representing the author. A catalog might provide authors the ability to store their GPG key, personal home page, and other additional metadata about the author, and optionally the ability to associate several e-mail addresses with the same person. Author-related metadata fields are not covered by this PEP. Example: Author-email: "C. Schultz" <[email protected]> License Text indicating the license covering the package where the license is not a selection from the "License" Trove classifiers. See "Classifier" below. Example: License: This software may only be obtained by sending the author a postcard, and then the user promises not to redistribute it. Classifier (multiple use) Each entry is a string giving a single classification value for the package. Classifiers are described in PEP 301. Examples: Classifier: Development Status :: 4 - Beta Classifier: Environment :: Console (Text Based) Requires (multiple use) Each entry contains a string describing some other module or package required by this package. The format of a requirement string is identical to that of a module or package name usable with the 'import' statement, optionally followed by a version declaration within parentheses. A version declaration is a series of conditional operators and version numbers, separated by commas. Conditional operators must be one of "<", ">", "<=", ">=", "==", and "!=". Version numbers must be in the format accepted by the distutils.version.StrictVersion class: two or three dot-separated numeric components, with an optional "pre-release" tag on the end consisting of the letter 'a' or 'b' followed by a number. Example version numbers are "1.0", "2.3a2", "1.3.99", Any number of conditional operators can be specified, e.g. the string ">1.0, !=1.3.4, <2.0" is a legal version declaration. All of the following are possible requirement strings: "rfc822", "zlib (>=1.1.4)", "zope". There's no canonical list of what strings should be used; the Python community is left to choose its own standards. Example: Requires: re Requires: sys Requires: zlib Requires: xml.parsers.expat (>1.0) Requires: psycopg Provides (multiple use) Each entry contains a string describing a package or module that will be provided by this package once it is installed. These strings should match the ones used in Requirements fields. A version declaration may be supplied (without a comparison operator); the package's version number will be implied if none is specified. Example: Provides: xml Provides: xml.utils Provides: xml.utils.iso8601 Provides: xml.dom Provides: xmltools (1.3) Obsoletes (multiple use) Each entry contains a string describing a package or module that this package renders obsolete, meaning that the two packages should not be installed at the same time. Version declarations can be supplied. The most common use of this field will be in case a package name changes, e.g. Gorgon 2.3 gets subsumed into Torqued Python 1.0. When you install Torqued Python, the Gorgon package should be removed. Example: Obsoletes: Gorgon Summary of Differences From PEP 241 - Metadata-Version is now 1.1. - Added the Classifiers field from PEP 301. - The License and Platform files should now only be used if the platform or license can't be handled by an appropriate Classifier value. - Added fields: Download-URL, Requires, Provides, Obsoletes. Open issues None. Acknowledgements None. References Copyright This document has been placed in the public domain. [1] reStructuredText http://docutils.sourceforge.net/
python-peps
2024-10-18T13:23:33.300442
2003-04-12T00:00:00
{ "license": "Public Domain", "url": "https://peps.python.org/pep-0314/", "authors": [ "A.M. Kuchling", "Richard Jones" ], "pep_number": "0314", "pandoc_version": "3.5" }
0367
PEP: 367 Title: New Super Version: $Revision$ Last-Modified: $Date$ Author: Calvin Spealman <[email protected]>, Tim Delaney <[email protected]> Status: Superseded Type: Standards Track Content-Type: text/x-rst Created: 28-Apr-2007 Python-Version: 2.6 Post-History: 28-Apr-2007, 29-Apr-2007, 29-Apr-2007, 14-May-2007 Numbering Note This PEP has been renumbered to PEP 3135. The text below is the last version submitted under the old number. Abstract This PEP proposes syntactic sugar for use of the super type to automatically construct instances of the super type binding to the class that a method was defined in, and the instance (or class object for classmethods) that the method is currently acting upon. The premise of the new super usage suggested is as follows: super.foo(1, 2) to replace the old: super(Foo, self).foo(1, 2) and the current __builtin__.super be aliased to __builtin__.__super__ (with __builtin__.super to be removed in Python 3.0). It is further proposed that assignment to super become a SyntaxError, similar to the behaviour of None. Rationale The current usage of super requires an explicit passing of both the class and instance it must operate from, requiring a breaking of the DRY (Don't Repeat Yourself) rule. This hinders any change in class name, and is often considered a wart by many. Specification Within the specification section, some special terminology will be used to distinguish similar and closely related concepts. "super type" will refer to the actual builtin type named "super". A "super instance" is simply an instance of the super type, which is associated with a class and possibly with an instance of that class. Because the new super semantics are not backwards compatible with Python 2.5, the new semantics will require a __future__ import: from __future__ import new_super The current __builtin__.super will be aliased to __builtin__.__super__. This will occur regardless of whether the new super semantics are active. It is not possible to simply rename __builtin__.super, as that would affect modules that do not use the new super semantics. In Python 3.0 it is proposed that the name __builtin__.super will be removed. Replacing the old usage of super, calls to the next class in the MRO (method resolution order) can be made without explicitly creating a super instance (although doing so will still be supported via __super__). Every function will have an implicit local named super. This name behaves identically to a normal local, including use by inner functions via a cell, with the following exceptions: 1. Assigning to the name super will raise a SyntaxError at compile time; 2. Calling a static method or normal function that accesses the name super will raise a TypeError at runtime. Every function that uses the name super, or has an inner function that uses the name super, will include a preamble that performs the equivalent of: super = __builtin__.__super__(<class>, <instance>) where <class> is the class that the method was defined in, and <instance> is the first parameter of the method (normally self for instance methods, and cls for class methods). For static methods and normal functions, <class> will be None, resulting in a TypeError being raised during the preamble. Note: The relationship between super and __super__ is similar to that between import and __import__. Much of this was discussed in the thread of the python-dev list, "Fixing super anyone?"[1]. Open Issues Determining the class object to use The exact mechanism for associating the method with the defining class is not specified in this PEP, and should be chosen for maximum performance. For CPython, it is suggested that the class instance be held in a C-level variable on the function object which is bound to one of NULL (not part of a class), Py_None (static method) or a class object (instance or class method). Should super actually become a keyword? With this proposal, super would become a keyword to the same extent that None is a keyword. It is possible that further restricting the super name may simplify implementation, however some are against the actual keyword-ization of super. The simplest solution is often the correct solution and the simplest solution may well not be adding additional keywords to the language when they are not needed. Still, it may solve other open issues. Closed Issues super used with __call__ attributes It was considered that it might be a problem that instantiating super instances the classic way, because calling it would lookup the __call__ attribute and thus try to perform an automatic super lookup to the next class in the MRO. However, this was found to be false, because calling an object only looks up the __call__ method directly on the object's type. The following example shows this in action. class A(object): def __call__(self): return '__call__' def __getattribute__(self, attr): if attr == '__call__': return lambda: '__getattribute__' a = A() assert a() == '__call__' assert a.__call__() == '__getattribute__' In any case, with the renaming of __builtin__.super to __builtin__.__super__ this issue goes away entirely. Reference Implementation It is impossible to implement the above specification entirely in Python. This reference implementation has the following differences to the specification: 1. New super semantics are implemented using bytecode hacking. 2. Assignment to super is not a SyntaxError. Also see point #4. 3. Classes must either use the metaclass autosuper_meta or inherit from the base class autosuper to acquire the new super semantics. 4. super is not an implicit local variable. In particular, for inner functions to be able to use the super instance, there must be an assignment of the form super = super in the method. The reference implementation assumes that it is being run on Python 2.5+. #!/usr/bin/env python # # autosuper.py from array import array import dis import new import types import __builtin__ __builtin__.__super__ = __builtin__.super del __builtin__.super # We need these for modifying bytecode from opcode import opmap, HAVE_ARGUMENT, EXTENDED_ARG LOAD_GLOBAL = opmap['LOAD_GLOBAL'] LOAD_NAME = opmap['LOAD_NAME'] LOAD_CONST = opmap['LOAD_CONST'] LOAD_FAST = opmap['LOAD_FAST'] LOAD_ATTR = opmap['LOAD_ATTR'] STORE_FAST = opmap['STORE_FAST'] LOAD_DEREF = opmap['LOAD_DEREF'] STORE_DEREF = opmap['STORE_DEREF'] CALL_FUNCTION = opmap['CALL_FUNCTION'] STORE_GLOBAL = opmap['STORE_GLOBAL'] DUP_TOP = opmap['DUP_TOP'] POP_TOP = opmap['POP_TOP'] NOP = opmap['NOP'] JUMP_FORWARD = opmap['JUMP_FORWARD'] ABSOLUTE_TARGET = dis.hasjabs def _oparg(code, opcode_pos): return code[opcode_pos+1] + (code[opcode_pos+2] << 8) def _bind_autosuper(func, cls): co = func.func_code name = func.func_name newcode = array('B', co.co_code) codelen = len(newcode) newconsts = list(co.co_consts) newvarnames = list(co.co_varnames) # Check if the global 'super' keyword is already present try: sn_pos = list(co.co_names).index('super') except ValueError: sn_pos = None # Check if the varname 'super' keyword is already present try: sv_pos = newvarnames.index('super') except ValueError: sv_pos = None # Check if the cellvar 'super' keyword is already present try: sc_pos = list(co.co_cellvars).index('super') except ValueError: sc_pos = None # If 'super' isn't used anywhere in the function, we don't have anything to do if sn_pos is None and sv_pos is None and sc_pos is None: return func c_pos = None s_pos = None n_pos = None # Check if the 'cls_name' and 'super' objects are already in the constants for pos, o in enumerate(newconsts): if o is cls: c_pos = pos if o is __super__: s_pos = pos if o == name: n_pos = pos # Add in any missing objects to constants and varnames if c_pos is None: c_pos = len(newconsts) newconsts.append(cls) if n_pos is None: n_pos = len(newconsts) newconsts.append(name) if s_pos is None: s_pos = len(newconsts) newconsts.append(__super__) if sv_pos is None: sv_pos = len(newvarnames) newvarnames.append('super') # This goes at the start of the function. It is: # # super = __super__(cls, self) # # If 'super' is a cell variable, we store to both the # local and cell variables (i.e. STORE_FAST and STORE_DEREF). # preamble = [ LOAD_CONST, s_pos & 0xFF, s_pos >> 8, LOAD_CONST, c_pos & 0xFF, c_pos >> 8, LOAD_FAST, 0, 0, CALL_FUNCTION, 2, 0, ] if sc_pos is None: # 'super' is not a cell variable - we can just use the local variable preamble += [ STORE_FAST, sv_pos & 0xFF, sv_pos >> 8, ] else: # If 'super' is a cell variable, we need to handle LOAD_DEREF. preamble += [ DUP_TOP, STORE_FAST, sv_pos & 0xFF, sv_pos >> 8, STORE_DEREF, sc_pos & 0xFF, sc_pos >> 8, ] preamble = array('B', preamble) # Bytecode for loading the local 'super' variable. load_super = array('B', [ LOAD_FAST, sv_pos & 0xFF, sv_pos >> 8, ]) preamble_len = len(preamble) need_preamble = False i = 0 while i < codelen: opcode = newcode[i] need_load = False remove_store = False if opcode == EXTENDED_ARG: raise TypeError("Cannot use 'super' in function with EXTENDED_ARG opcode") # If the opcode is an absolute target it needs to be adjusted # to take into account the preamble. elif opcode in ABSOLUTE_TARGET: oparg = _oparg(newcode, i) + preamble_len newcode[i+1] = oparg & 0xFF newcode[i+2] = oparg >> 8 # If LOAD_GLOBAL(super) or LOAD_NAME(super) then we want to change it into # LOAD_FAST(super) elif (opcode == LOAD_GLOBAL or opcode == LOAD_NAME) and _oparg(newcode, i) == sn_pos: need_preamble = need_load = True # If LOAD_FAST(super) then we just need to add the preamble elif opcode == LOAD_FAST and _oparg(newcode, i) == sv_pos: need_preamble = need_load = True # If LOAD_DEREF(super) then we change it into LOAD_FAST(super) because # it's slightly faster. elif opcode == LOAD_DEREF and _oparg(newcode, i) == sc_pos: need_preamble = need_load = True if need_load: newcode[i:i+3] = load_super i += 1 if opcode >= HAVE_ARGUMENT: i += 2 # No changes needed - get out. if not need_preamble: return func # Our preamble will have 3 things on the stack co_stacksize = max(3, co.co_stacksize) # Conceptually, our preamble is on the `def` line. co_lnotab = array('B', co.co_lnotab) if co_lnotab: co_lnotab[0] += preamble_len co_lnotab = co_lnotab.tostring() # Our code consists of the preamble and the modified code. codestr = (preamble + newcode).tostring() codeobj = new.code(co.co_argcount, len(newvarnames), co_stacksize, co.co_flags, codestr, tuple(newconsts), co.co_names, tuple(newvarnames), co.co_filename, co.co_name, co.co_firstlineno, co_lnotab, co.co_freevars, co.co_cellvars) func.func_code = codeobj func.func_class = cls return func class autosuper_meta(type): def __init__(cls, name, bases, clsdict): UnboundMethodType = types.UnboundMethodType for v in vars(cls): o = getattr(cls, v) if isinstance(o, UnboundMethodType): _bind_autosuper(o.im_func, cls) class autosuper(object): __metaclass__ = autosuper_meta if __name__ == '__main__': class A(autosuper): def f(self): return 'A' class B(A): def f(self): return 'B' + super.f() class C(A): def f(self): def inner(): return 'C' + super.f() # Needed to put 'super' into a cell super = super return inner() class D(B, C): def f(self, arg=None): var = None return 'D' + super.f() assert D().f() == 'DBCA' Disassembly of B.f and C.f reveals the different preambles used when super is simply a local variable compared to when it is used by an inner function. >>> dis.dis(B.f) 214 0 LOAD_CONST 4 (<type 'super'>) 3 LOAD_CONST 2 (<class '__main__.B'>) 6 LOAD_FAST 0 (self) 9 CALL_FUNCTION 2 12 STORE_FAST 1 (super) 215 15 LOAD_CONST 1 ('B') 18 LOAD_FAST 1 (super) 21 LOAD_ATTR 1 (f) 24 CALL_FUNCTION 0 27 BINARY_ADD 28 RETURN_VALUE >>> dis.dis(C.f) 218 0 LOAD_CONST 4 (<type 'super'>) 3 LOAD_CONST 2 (<class '__main__.C'>) 6 LOAD_FAST 0 (self) 9 CALL_FUNCTION 2 12 DUP_TOP 13 STORE_FAST 1 (super) 16 STORE_DEREF 0 (super) 219 19 LOAD_CLOSURE 0 (super) 22 LOAD_CONST 1 (<code object inner at 00C160A0, file "autosuper.py", line 219>) 25 MAKE_CLOSURE 0 28 STORE_FAST 2 (inner) 223 31 LOAD_FAST 1 (super) 34 STORE_DEREF 0 (super) 224 37 LOAD_FAST 2 (inner) 40 CALL_FUNCTION 0 43 RETURN_VALUE Note that in the final implementation, the preamble would not be part of the bytecode of the method, but would occur immediately following unpacking of parameters. Alternative Proposals No Changes Although its always attractive to just keep things how they are, people have sought a change in the usage of super calling for some time, and for good reason, all mentioned previously. - Decoupling from the class name (which might not even be bound to the right class anymore!) - Simpler looking, cleaner super calls would be better Dynamic attribute on super type The proposal adds a dynamic attribute lookup to the super type, which will automatically determine the proper class and instance parameters. Each super attribute lookup identifies these parameters and performs the super lookup on the instance, as the current super implementation does with the explicit invocation of a super instance upon a class and instance. This proposal relies on sys._getframe(), which is not appropriate for anything except a prototype implementation. super(__this_class__, self) This is nearly an anti-proposal, as it basically relies on the acceptance of the __this_class__ PEP, which proposes a special name that would always be bound to the class within which it is used. If that is accepted, __this_class__ could simply be used instead of the class' name explicitly, solving the name binding issues[2]. self.__super__.foo(*args) The __super__ attribute is mentioned in this PEP in several places, and could be a candidate for the complete solution, actually using it explicitly instead of any super usage directly. However, double-underscore names are usually an internal detail, and attempted to be kept out of everyday code. super(self, *args) or __super__(self, *args) This solution only solves the problem of the type indication, does not handle differently named super methods, and is explicit about the name of the instance. It is less flexible without being able to enacted on other method names, in cases where that is needed. One use case this fails is where a base-class has a factory classmethod and a subclass has two factory classmethods, both of which needing to properly make super calls to the one in the base-class. super.foo(self, *args) This variation actually eliminates the problems with locating the proper instance, and if any of the alternatives were pushed into the spotlight, I would want it to be this one. super or super() This proposal leaves no room for different names, signatures, or application to other classes, or instances. A way to allow some similar use alongside the normal proposal would be favorable, encouraging good design of multiple inheritance trees and compatible methods. super(*p, **kw) There has been the proposal that directly calling super(*p, **kw) would be equivalent to calling the method on the super object with the same name as the method currently being executed i.e. the following two methods would be equivalent: def f(self, *p, **kw): super.f(*p, **kw) def f(self, *p, **kw): super(*p, **kw) There is strong sentiment for and against this, but implementation and style concerns are obvious. Guido has suggested that this should be excluded from this PEP on the principle of KISS (Keep It Simple Stupid). History 29-Apr-2007 - Changed title from "Super As A Keyword" to "New Super" - Updated much of the language and added a terminology section for clarification in confusing places. - Added reference implementation and history sections. 06-May-2007 - Updated by Tim Delaney to reflect discussions on the python-3000 and python-dev mailing lists. References Copyright This document has been placed in the public domain. Local Variables: mode: indented-text indent-tabs-mode: nil sentence-end-double-space: t fill-column: 70 coding: utf-8 End: [1] Fixing super anyone? (https://mail.python.org/pipermail/python-3000/2007-April/006667.html) [2] PEP 3130: Access to Module/Class/Function Currently Being Defined (this) (https://mail.python.org/pipermail/python-ideas/2007-April/000542.html)
python-peps
2024-10-18T13:23:33.317143
2007-04-28T00:00:00
{ "license": "Public Domain", "url": "https://peps.python.org/pep-0367/", "authors": [ "Calvin Spealman" ], "pep_number": "0367", "pandoc_version": "3.5" }
0569
PEP: 569 Title: Python 3.8 Release Schedule Author: Łukasz Langa <[email protected]> Status: Final Type: Informational Topic: Release Created: 27-Jan-2018 Python-Version: 3.8 Abstract This document describes the development and release schedule for Python 3.8. The schedule primarily concerns itself with PEP-sized items. Release Manager and Crew - 3.8 Release Manager: Łukasz Langa - Windows installers: Steve Dower - Mac installers: Ned Deily - Documentation: Julien Palard 3.8 Lifespan 3.8 will receive bugfix updates approximately every 2 months for approximately 18 months. Some time after the release of 3.9.0 final, the ninth and final 3.8 bugfix update was released. After that, security updates (source only) were released for 5 years until the release of Python 3.13.0 final. As of 2024-10-07, 3.8 has reached the end-of-life phase of its release cycle. 3.8.20 was the final security release. The codebase for 3.8 is now frozen and no further updates will be provided nor issues of any kind will be accepted on the bug tracker. Release Schedule 3.8.0 schedule - 3.8 development begins: Monday, 2018-01-29 - 3.8.0 alpha 1: Sunday, 2019-02-03 - 3.8.0 alpha 2: Monday, 2019-02-25 - 3.8.0 alpha 3: Monday, 2019-03-25 - 3.8.0 alpha 4: Monday, 2019-05-06 - 3.8.0 beta 1: Tuesday, 2019-06-04 (No new features beyond this point.) - 3.8.0 beta 2: Thursday, 2019-07-04 - 3.8.0 beta 3: Monday, 2019-07-29 - 3.8.0 beta 4: Friday, 2019-08-30 - 3.8.0 candidate 1: Tuesday, 2019-10-01 - 3.8.0 final: Monday, 2019-10-14 Bugfix releases - 3.8.1rc1: Tuesday, 2019-12-10 - 3.8.1: Wednesday, 2019-12-18 - 3.8.2rc1: Monday, 2020-02-10 - 3.8.2rc2: Monday, 2020-02-17 - 3.8.2: Monday, 2020-02-24 - 3.8.3rc1: Wednesday, 2020-04-29 - 3.8.3: Wednesday, 2020-05-13 - 3.8.4rc1: Tuesday, 2020-06-30 - 3.8.4: Monday, 2020-07-13 - 3.8.5: Monday, 2020-07-20 (security hotfix) - 3.8.6rc1: Tuesday, 2020-09-08 - 3.8.6: Thursday, 2020-09-24 - 3.8.7rc1: Monday, 2020-12-07 - 3.8.7: Monday, 2020-12-21 - 3.8.8rc1: Tuesday, 2021-02-16 - 3.8.8: Friday, 2021-02-19 - 3.8.9: Friday, 2021-04-02 (security hotfix) - 3.8.10: Monday, 2021-05-03 (final regular bugfix release with binary installers) Source-only security fix releases Provided irregularly on an "as-needed" basis until October 7th 2024. - 3.8.11: Monday, 2021-06-28 - 3.8.12: Monday, 2021-08-30 - 3.8.13: Wednesday, 2022-03-16 - 3.8.14: Tuesday, 2022-09-06 - 3.8.15: Tuesday, 2022-10-11 - 3.8.16: Tuesday, 2022-12-06 - 3.8.17: Tuesday, 2023-06-06 - 3.8.18: Thursday, 2023-08-24 - 3.8.19: Tuesday, 2024-03-19 - 3.8.20: Friday, 2024-09-06 (final security release) Features for 3.8 Some of the notable features of Python 3.8 include: - PEP 570, Positional-only arguments - PEP 572, Assignment Expressions - PEP 574, Pickle protocol 5 with out-of-band data - PEP 578, Runtime audit hooks - PEP 587, Python Initialization Configuration - PEP 590, Vectorcall: a fast calling protocol for CPython - Typing-related: PEP 591 (Final qualifier), PEP 586 (Literal types), and PEP 589 (TypedDict) - Parallel filesystem cache for compiled bytecode - Debug builds share ABI as release builds - f-strings support a handy = specifier for debugging - continue is now legal in finally: blocks - on Windows, the default asyncio event loop is now ProactorEventLoop - on macOS, the spawn start method is now used by default in multiprocessing - multiprocessing can now use shared memory segments to avoid pickling costs between processes - typed_ast is merged back to CPython - LOAD_GLOBAL is now 40% faster - pickle now uses Protocol 4 by default, improving performance There are many other interesting changes, please consult the "What's New" page in the documentation for a full list. Copyright This document has been placed in the public domain.
python-peps
2024-10-18T13:23:33.331235
2018-01-27T00:00:00
{ "license": "Public Domain", "url": "https://peps.python.org/pep-0569/", "authors": [ "Łukasz Langa" ], "pep_number": "0569", "pandoc_version": "3.5" }
0423
PEP: 423 Title: Naming conventions and recipes related to packaging Version: $Revision$ Last-Modified: $Date$ Author: Benoit Bryon <[email protected]> Discussions-To: [email protected] Status: Deferred Type: Informational Topic: Packaging Content-Type: text/x-rst Created: 24-May-2012 Post-History: Abstract This document deals with: - names of Python projects, - names of Python packages or modules being distributed, - namespace packages. It provides guidelines and recipes for distribution authors: - new projects should follow the guidelines below. - existing projects should be aware of these guidelines and can follow specific recipes for existing projects. PEP Deferral Further consideration of this PEP has been deferred at least until after PEP 426 (package metadata 2.0) and related updates have been resolved. Terminology Reference is packaging terminology in Python documentation. Relationship with other PEPs - 8#package-and-module-names deals with code style guide, including names of Python packages and modules. It covers syntax of package/modules names. - PEP 345 deals with packaging metadata, and defines name argument of the packaging.core.setup() function. - PEP 420 deals with namespace packages. It brings support of namespace packages to Python core. Before, namespaces packages were implemented by external libraries. - PEP 3108 deals with transition between Python 2.x and Python 3.x applied to standard library: some modules to be deleted, some to be renamed. It points out that naming conventions matter and is an example of transition plan. Overview Here is a summarized list of guidelines you should follow to choose names: - understand and respect namespace ownership. - if your project is related to another project or community: - search for conventions in main project's documentation, because projects should organize community contributions. - follow specific project or related community conventions, if any. - if there is no convention, follow a standard naming pattern. - make sure your project name is unique, i.e. avoid duplicates: - use top-level namespace for ownership, - check for name availability, - register names with PyPI. - make sure distributed packages and modules names are unique, unless you explicitly want to distribute alternatives to existing packages or modules. Using the same value for package/module name and project name is the recommended way to achieve this. - distribute only one package or module at a time, unless you know what you are doing. It makes it possible to apply the "use a single name" rule, and thus make names consistent. - make it easy to discover and remember your project: - use as much memorable names as possible, - use as much meaningful names as possible, - use other packaging metadata. - avoid deep nesting. Flat things are easier to use and remember than nested ones: - one or two namespace levels are recommended, because they are almost always enough. - even if not recommended, three levels are, de facto, a common case. - in most cases, you should not need more than three levels. - follow PEP 8 for syntax of package and module names. - if you followed specific conventions, or if your project is intended to receive contributions from the community, organize community contributions. - if still in doubt, ask. If in doubt, ask If you feel unsure after reading this document, ask Python community on IRC or on a mailing list. Top-level namespace relates to code ownership This helps avoid clashes between project names. Ownership could be: - an individual. Example: gp.fileupload is owned and maintained by Gael Pasgrimaud. - an organization. Examples: - zest.releaser is owned and maintained by Zest Software. - Django is owned and maintained by the Django Software Foundation. - a group or community. Example: sphinx is maintained by developers of the Sphinx project, not only by its author, Georg Brandl. - a group or community related to another package. Example: collective.recaptcha is owned by its author: David Glick, Groundwire. But the "collective" namespace is owned by Plone community. Respect ownership Understand the purpose of namespace before you use it. Don't plug into a namespace you don't own, unless explicitly authorized. If in doubt, ask. As an example, don't plug in "django.contrib" namespace because it is managed by Django's core contributors. Exceptions can be defined by project authors. See Organize community contributions below. Also, this rule applies to non-Python projects. As an example, don't use "apache" as top-level namespace: "Apache" is the name of an existing project (in the case of "Apache", it is also a trademark). Private (including closed-source) projects use a namespace ... because private projects are owned by somebody. So apply the ownership rule. For internal/customer projects, use your company name as the namespace. This rule applies to closed-source projects. As an example, if you are creating a "climbing" project for the "Python Sport" company: use "pythonsport.climbing" name, even if it is closed source. Individual projects use a namespace ... because they are owned by individuals. So apply the ownership rule. There is no shame in releasing a project as open source even if it has an "internal" or "individual" name. If the project comes to a point where the author wants to change ownership (i.e. the project no longer belongs to an individual), keep in mind it is easy to rename the project. Community-owned projects can avoid namespace packages If your project is generic enough (i.e. it is not a contrib to another product or framework), you can avoid namespace packages. The base condition is generally that your project is owned by a group (i.e. the development team) which is dedicated to this project. Only use a "shared" namespace if you really intend the code to be community owned. As an example, sphinx project belongs to the Sphinx development team. There is no need to have some "sphinx" namespace package with only one "sphinx.sphinx" project inside. In doubt, use an individual/organization namespace If your project is really experimental, best choice is to use an individual or organization namespace: - it allows projects to be released early. - it won't block a name if the project is abandoned. - it doesn't block future changes. When a project becomes mature and there is no reason to keep individual ownership, it remains possible to rename the project. Use a single name Distribute only one package (or only one module) per project, and use package (or module) name as project name. - It avoids possible confusion between project name and distributed package or module name. - It makes the name consistent. - It is explicit: when one sees project name, he guesses package/module name, and vice versa. - It also limits implicit clashes between package/module names. By using a single name, when you register a project name to PyPI, you also perform a basic package/module name availability verification. As an example, pipeline, python-pipeline and django-pipeline all distribute a package or module called "pipeline". So installing two of them leads to errors. This issue wouldn't have occurred if these distributions used a single name. Yes: - Package name: "kheops.pyramid", i.e. import kheops.pyramid - Project name: "kheops.pyramid", i.e. pip install kheops.pyramid No: - Package name: "kheops" - Project name: "KheopsPyramid" Note For historical reasons, PyPI contains many distributions where project and distributed package/module names differ. Multiple packages/modules should be rare Technically, Python distributions can provide multiple packages and/or modules. See setup script reference for details. Some distributions actually do. As an example, setuptools and distribute are both declaring "pkg_resources", "easy_install" and "site" modules in addition to respective "setuptools" and "distribute" packages. Consider this use case as exceptional. In most cases, you don't need this feature. So a distribution should provide only one package or module at a time. Distinct names should be rare A notable exception to the Use a single name rule is when you explicitly need distinct names. As an example, the Pillow project provides an alternative to the original PIL distribution. Both projects distribute a "PIL" package. Consider this use case as exceptional. In most cases, you don't need this feature. So a distributed package name should be equal to project name. Follow PEP 8 for syntax of package and module names PEP 8 <8#package-and-module-names> applies to names of Python packages and modules. If you Use a single name, PEP 8 <8#package-and-module-names> also applies to project names. The exceptions are namespace packages, where dots are required in project name. Pick memorable names One important thing about a project name is that it be memorable. As an example, celery is not a meaningful name. At first, it is not obvious that it deals with message queuing. But it is memorable, partly because it can be used to feed a RabbitMQ server. Pick meaningful names Ask yourself "how would I describe in one sentence what this name is for?", and then "could anyone have guessed that by looking at the name?". As an example, DateUtils is a meaningful name. It is obvious that it deals with utilities for dates. When you are using namespaces, try to make each part meaningful. Use packaging metadata Consider project names as unique identifiers on PyPI: - it is important that these identifiers remain human-readable. - it is even better when these identifiers are meaningful. - but the primary purpose of identifiers is not to classify or describe projects. Classifiers and keywords metadata are made for categorization of distributions. Summary and description metadata are meant to describe the project. As an example, there is a "Framework :: Twisted" classifier. Even if names are quite heterogeneous (they don't follow a particular pattern), we get the list. In order to Organize community contributions, conventions about names and namespaces matter, but conventions about metadata should be even more important. As an example, we can find Plone portlets in many places: - plone.portlet.* - collective.portlet.* - collective.portlets.* - collective.*.portlets - some vendor-related projects such as "quintagroup.portlet.cumulus" - and even projects where "portlet" pattern doesn't appear in the name. Even if Plone community has conventions, using the name to categorize distributions is inappropriate. It's impossible to get the full list of distributions that provide portlets for Plone by filtering on names. But it would be possible if all these distributions used "Framework :: Plone" classifier and "portlet" keyword. Avoid deep nesting The Zen of Python <20> says "Flat is better than nested". Two levels is almost always enough Don't define everything in deeply nested hierarchies: you will end up with projects and packages like "pythonsport.common.maps.forest". This type of name is both verbose and cumbersome (e.g. if you have many imports from the package). Furthermore, big hierarchies tend to break down over time as the boundaries between different packages blur. The consensus is that two levels of nesting are preferred. For example, we have plone.principalsource instead of plone.source.principal or something like that. The name is shorter, the package structure is simpler, and there would be very little to gain from having three levels of nesting here. It would be impractical to try to put all "core Plone" sources (a source is kind of vocabulary) into the plone.source.* namespace, in part because some sources are part of other packages, and in part because sources already exist in other places. Had we made a new namespace, it would be inconsistently used from the start. Yes: "pyranha" Yes: "pythonsport.climbing" Yes: "pythonsport.forestmap" No: "pythonsport.maps.forest" Use only one level for ownership Don't use 3 levels to set individual/organization ownership in a community namespace. As an example, let's consider: - you are plugging into a community namespace, such as "collective". - and you want to add a more restrictive "ownership" level, to avoid clashes inside the community. In such a case, you'd better use the most restrictive ownership level as first level. As an example, where "collective" is a major community namespace that "gergovie" belongs to, and "vercingetorix" it the name of "gergovie" author: No: "collective.vercingetorix.gergovie" Yes: "vercingetorix.gergovie" Don't use namespace levels for categorization Use packaging metadata instead. Don't use more than 3 levels Technically, you can create deeply nested hierarchies. However, in most cases, you shouldn't need it. Note Even communities where namespaces are standard don't use more than 3 levels. Conventions for communities or related projects Follow community or related project conventions, if any Projects or related communities can have specific conventions, which may differ from those explained in this document. In such a case, they should declare specific conventions in documentation. So, if your project belongs to another project or to a community, first look for specific conventions in main project's documentation. If there is no specific conventions, follow the ones declared in this document. As an example, Plone community releases community contributions in the "collective" namespace package. It differs from the standard namespace for contributions proposed here. But since it is documented, there is no ambiguity and you should follow this specific convention. Use standard pattern for community contributions When no specific rule is defined, use the ${MAINPROJECT}contrib.${PROJECT} pattern to store community contributions for any product or framework, where: - ${MAINPROJECT} is the name of the related project. "pyranha" in the example below. - ${PROJECT} is the name of your project. "giantteeth" in the example below. As an example: - you are the author of "pyranha" project. You own the "pyranha" namespace. - you didn't defined specific naming conventions for community contributions. - a third-party developer wants to publish a "giantteeth" project related to your "pyranha" project in a community namespace. So he should publish it as "pyranhacontrib.giantteeth". It is the simplest way to Organize community contributions. Note Why ${MAINPROJECT}contrib.* pattern? - ${MAINPROJECT}c.* is not explicit enough. As examples, "zc" belongs to "Zope Corporation" whereas "z3c" belongs to "Zope 3 community". - ${MAINPROJECT}community is too long. - ${MAINPROJECT}community conflicts with existing namespaces such as "iccommunity" or "PyCommunity". - ${MAINPROJECT}.contrib.* is inside ${MAINPROJECT} namespace, i.e. it is owned by ${MAINPROJECT} authors. It breaks the Top-level namespace relates to code ownership rule. - ${MAINPROJECT}.contrib.* breaks the Avoid deep nesting rule. - names where ${MAINPROJECT} doesn't appear are not explicit enough, i.e. nobody can guess they are related to ${MAINPROJECT}. As an example, it is not obvious that "collective.*" belongs to Plone community. - {$DIST}contrib.* looks like existing sphinxcontrib-* packages. But sphinxcontrib-* is actually about Sphinx contrib, so this is not a real conflict... In fact, the "contrib" suffix was inspired by "sphinxcontrib". Organize community contributions This is the counterpart of the follow community conventions and standard pattern for contributions rules. Actions: - Choose a naming convention for community contributions. - If it is not the default, then document it. - if you use the default convention, then this document should be enough. Don't repeat it. You may reference it. - else, tell users about custom conventions in project's "contribute" or "create modules" documentation. - Also recommend the use of additional metadata, such as classifiers and keywords. About convention choices: - New projects should choose the default contrib pattern. - Existing projects with community contributions should start with custom conventions. Then they can Promote migrations. It means that existing community conventions don't have to be changed. But, at least, they should be explicitly documented. Example: "pyranha" is your project name and package name. Tell contributors that: - pyranha-related distributions should use the "pyranha" keyword - pyranha-related distributions providing templates should also use "templates" keyword. - community contributions should be released under "pyranhacontrib" namespace (i.e. use "pyranhacontrib.*" pattern). Register names with PyPI PyPI is the central place for distributions in Python community. So, it is also the place where to register project and package names. See Registering with the Package Index for details. Recipes The following recipes will help you follow the guidelines and conventions above. How to check for name availability? Before you choose a project name, make sure it hasn't already been registered in the following locations: - PyPI - that's all. PyPI is the only official place. As an example, you could also check in various locations such as popular code hosting services, but keep in mind that PyPI is the only place you can register for names in Python community. That's why it is important you register names with PyPI. Also make sure the names of distributed packages or modules haven't already been registered: - in the Python Standard Library. - inside projects at PyPI. There is currently no helper for that. Notice that the more projects follow the use a single name rule, the easier is the verification. - you may ask the community. The use a single name rule also helps you avoid clashes with package names: if a project name is available, then the package name has good chances to be available too. How to rename a project? Renaming a project is possible, but keep in mind that it will cause some confusions. So, pay particular attention to README and documentation, so that users understand what happened. 1. First of all, do not remove legacy distributions from PyPI. Because some users may be using them. 2. Copy the legacy project, then change names (project and package/module). Pay attention to, at least: - packaging files, - folder name that contains source files, - documentation, including README, - import statements in code. 3. Assign Obsoletes-Dist metadata to new distribution in setup.cfg file. See PEP 345 about Obsolete-Dist <345#obsoletes-dist-multiple-use> and setup.cfg specification. 4. Release a new version of the renamed project, then publish it. 5. Edit legacy project: - add dependency to new project, - drop everything except packaging stuff, - add the Development Status :: 7 - Inactive classifier in setup script, - publish a new release. So, users of the legacy package: - can continue using the legacy distributions at a deprecated version, - can upgrade to last version of legacy distribution, which is empty... - ... and automatically download new distribution as a dependency of the legacy one. Users who discover the legacy project see it is inactive. Improved handling of renamed projects on PyPI If many projects follow Renaming howto recipe, then many legacy distributions will have the following characteristics: - Development Status :: 7 - Inactive classifier. - latest version is empty, except packaging stuff. - latest version "redirects" to another distribution. E.g. it has a single dependency on the renamed project. - referenced as Obsoletes-Dist in a newer distribution. So it will be possible to detect renamed projects and improve readability on PyPI. So that users can focus on active distributions. But this feature is not required now. There is no urge. It won't be covered in this document. How to apply naming guidelines on existing projects? There is no obligation for existing projects to be renamed. The choice is left to project authors and mainteners for obvious reasons. However, project authors are invited to: - at least, state about current naming. - then plan and promote migration. - optionally actually rename existing project or distributed packages/modules. State about current naming The important thing, at first, is that you state about current choices: - Ask yourself "why did I choose the current name?", then document it. - If there are differences with the guidelines provided in this document, you should tell your users. - If possible, create issues in the project's bugtracker, at least for record. Then you are free to resolve them later, or maybe mark them as "wontfix". Projects that are meant to receive contributions from community should also organize community contributions. Promote migrations Every Python developer should migrate whenever possible, or promote the migrations in their respective communities. Apply these guidelines on your projects, then the community will see it is safe. In particular, "leaders" such as authors of popular projects are influential, they have power and, thus, responsibility over communities. Apply these guidelines on popular projects, then communities will adopt the conventions too. Projects should promote migrations when they release a new (major) version, particularly if this version introduces support for Python 3.x, new standard library's packaging or namespace packages. Opportunity As of Python 3.3 being developed: - many projects are not Python 3.x compatible. It includes "big" products or frameworks. It means that many projects will have to do a migration to support Python 3.x. - packaging (aka distutils2) is on the starting blocks. When it is released, projects will be invited to migrate and use new packaging. - PEP 420 brings official support of namespace packages to Python. It means that most active projects should be about to migrate in the next year(s) to support Python 3.x, new packaging or new namespace packages. Such an opportunity is unique and won't come again soon! So let's introduce and promote naming conventions as soon as possible (i.e. now). References Additional background: - Martin Aspeli's article about names. Some parts of this document are quotes from this article. - in development official packaging documentation. - The Hitchhiker's Guide to Packaging, which has an empty placeholder for "naming specification". References and footnotes: Copyright This document has been placed in the public domain. Local Variables: mode: indented-text indent-tabs-mode: nil sentence-end-double-space: t fill-column: 70 coding: utf-8 End:
python-peps
2024-10-18T13:23:33.381389
2012-05-24T00:00:00
{ "license": "Public Domain", "url": "https://peps.python.org/pep-0423/", "authors": [ "Benoit Bryon" ], "pep_number": "0423", "pandoc_version": "3.5" }
0203
PEP: 203 Title: Augmented Assignments Author: Thomas Wouters <[email protected]> Status: Final Type: Standards Track Content-Type: text/x-rst Created: 13-Jul-2000 Python-Version: 2.0 Post-History: 14-Aug-2000 Introduction This PEP describes the augmented assignment proposal for Python 2.0. This PEP tracks the status and ownership of this feature, slated for introduction in Python 2.0. It contains a description of the feature and outlines changes necessary to support the feature. This PEP summarizes discussions held in mailing list forums[1], and provides URLs for further information where appropriate. The CVS revision history of this file contains the definitive historical record. Proposed Semantics The proposed patch that adds augmented assignment to Python introduces the following new operators: += -= *= /= %= **= <<= >>= &= ^= |= They implement the same operator as their normal binary form, except that the operation is done in-place when the left-hand side object supports it, and that the left-hand side is only evaluated once. They truly behave as augmented assignment, in that they perform all of the normal load and store operations, in addition to the binary operation they are intended to do. So, given the expression: x += y The object x is loaded, then y is added to it, and the resulting object is stored back in the original place. The precise action performed on the two arguments depends on the type of x, and possibly of y. The idea behind augmented assignment in Python is that it isn't just an easier way to write the common practice of storing the result of a binary operation in its left-hand operand, but also a way for the left-hand operand in question to know that it should operate on itself, rather than creating a modified copy of itself. To make this possible, a number of new hooks are added to Python classes and C extension types, which are called when the object in question is used as the left hand side of an augmented assignment operation. If the class or type does not implement the in-place hooks, the normal hooks for the particular binary operation are used. So, given an instance object x, the expression: x += y tries to call x.__iadd__(y), which is the in-place variant of __add__ . If __iadd__ is not present, x.__add__(y) is attempted, and finally y.__radd__(x) if __add__ is missing too. There is no right-hand-side variant of __iadd__, because that would require for y to know how to in-place modify x, which is unsafe to say the least. The __iadd__ hook should behave similar to __add__, returning the result of the operation (which could be self) which is to be assigned to the variable x. For C extension types, the hooks are members of the PyNumberMethods and PySequenceMethods structures. Some special semantics apply to make the use of these methods, and the mixing of Python instance objects and C types, as unsurprising as possible. In the generic case of x <augop> y (or a similar case using the PyNumber_InPlace API functions) the principal object being operated on is x. This differs from normal binary operations, where x and y could be considered co-operating, because unlike in binary operations, the operands in an in-place operation cannot be swapped. However, in-place operations do fall back to normal binary operations when in-place modification is not supported, resulting in the following rules: - If the left-hand object (x) is an instance object, and it has a __coerce__ method, call that function with y as the argument. If coercion succeeds, and the resulting left-hand object is a different object than x, stop processing it as in-place and call the appropriate function for the normal binary operation, with the coerced x and y as arguments. The result of the operation is whatever that function returns. If coercion does not yield a different object for x, or x does not define a __coerce__ method, and x has the appropriate __ihook__ for this operation, call that method with y as the argument, and the result of the operation is whatever that method returns. - Otherwise, if the left-hand object is not an instance object, but its type does define the in-place function for this operation, call that function with x and y as the arguments, and the result of the operation is whatever that function returns. Note that no coercion on either x or y is done in this case, and it's perfectly valid for a C type to receive an instance object as the second argument; that is something that cannot happen with normal binary operations. - Otherwise, process it exactly as a normal binary operation (not in-place), including argument coercion. In short, if either argument is an instance object, resolve the operation through __coerce__, __hook__ and __rhook__. Otherwise, both objects are C types, and they are coerced and passed to the appropriate function. - If no way to process the operation can be found, raise a TypeError with an error message specific to the operation. - Some special casing exists to account for the case of + and *, which have a special meaning for sequences: for +, sequence concatenation, no coercion what so ever is done if a C type defines sq_concat or sq_inplace_concat. For *, sequence repeating, y is converted to a C integer before calling either sq_inplace_repeat and sq_repeat. This is done even if y is an instance, though not if x is an instance. The in-place function should always return a new reference, either to the old x object if the operation was indeed performed in-place, or to a new object. Rationale There are two main reasons for adding this feature to Python: simplicity of expression, and support for in-place operations. The end result is a tradeoff between simplicity of syntax and simplicity of expression; like most new features, augmented assignment doesn't add anything that was previously impossible. It merely makes these things easier to do. Adding augmented assignment will make Python's syntax more complex. Instead of a single assignment operation, there are now twelve assignment operations, eleven of which also perform a binary operation. However, these eleven new forms of assignment are easy to understand as the coupling between assignment and the binary operation, and they require no large conceptual leap to understand. Furthermore, languages that do have augmented assignment have shown that they are a popular, much used feature. Expressions of the form: <x> = <x> <operator> <y> are common enough in those languages to make the extra syntax worthwhile, and Python does not have significantly fewer of those expressions. Quite the opposite, in fact, since in Python you can also concatenate lists with a binary operator, something that is done quite frequently. Writing the above expression as: <x> <operator>= <y> is both more readable and less error prone, because it is instantly obvious to the reader that it is <x> that is being changed, and not <x> that is being replaced by something almost, but not quite, entirely unlike <x>. The new in-place operations are especially useful to matrix calculation and other applications that require large objects. In order to efficiently deal with the available program memory, such packages cannot blindly use the current binary operations. Because these operations always create a new object, adding a single item to an existing (large) object would result in copying the entire object (which may cause the application to run out of memory), add the single item, and then possibly delete the original object, depending on reference count. To work around this problem, the packages currently have to use methods or functions to modify an object in-place, which is definitely less readable than an augmented assignment expression. Augmented assignment won't solve all the problems for these packages, since some operations cannot be expressed in the limited set of binary operators to start with, but it is a start. PEP 211 is looking at adding new operators. New methods The proposed implementation adds the following 11 possible hooks which Python classes can implement to overload the augmented assignment operations: __iadd__ __isub__ __imul__ __idiv__ __imod__ __ipow__ __ilshift__ __irshift__ __iand__ __ixor__ __ior__ The i in __iadd__ stands for in-place. For C extension types, the following struct members are added. To PyNumberMethods: binaryfunc nb_inplace_add; binaryfunc nb_inplace_subtract; binaryfunc nb_inplace_multiply; binaryfunc nb_inplace_divide; binaryfunc nb_inplace_remainder; binaryfunc nb_inplace_power; binaryfunc nb_inplace_lshift; binaryfunc nb_inplace_rshift; binaryfunc nb_inplace_and; binaryfunc nb_inplace_xor; binaryfunc nb_inplace_or; To PySequenceMethods: binaryfunc sq_inplace_concat; intargfunc sq_inplace_repeat; In order to keep binary compatibility, the tp_flags TypeObject member is used to determine whether the TypeObject in question has allocated room for these slots. Until a clean break in binary compatibility is made (which may or may not happen before 2.0) code that wants to use one of the new struct members must first check that they are available with the PyType_HasFeature() macro: if (PyType_HasFeature(x->ob_type, Py_TPFLAGS_HAVE_INPLACE_OPS) && x->ob_type->tp_as_number && x->ob_type->tp_as_number->nb_inplace_add) { /* ... */ This check must be made even before testing the method slots for NULL values! The macro only tests whether the slots are available, not whether they are filled with methods or not. Implementation The current implementation of augmented assignment[2] adds, in addition to the methods and slots already covered, 13 new bytecodes and 13 new API functions. The API functions are simply in-place versions of the current binary-operation API functions: PyNumber_InPlaceAdd(PyObject *o1, PyObject *o2); PyNumber_InPlaceSubtract(PyObject *o1, PyObject *o2); PyNumber_InPlaceMultiply(PyObject *o1, PyObject *o2); PyNumber_InPlaceDivide(PyObject *o1, PyObject *o2); PyNumber_InPlaceRemainder(PyObject *o1, PyObject *o2); PyNumber_InPlacePower(PyObject *o1, PyObject *o2); PyNumber_InPlaceLshift(PyObject *o1, PyObject *o2); PyNumber_InPlaceRshift(PyObject *o1, PyObject *o2); PyNumber_InPlaceAnd(PyObject *o1, PyObject *o2); PyNumber_InPlaceXor(PyObject *o1, PyObject *o2); PyNumber_InPlaceOr(PyObject *o1, PyObject *o2); PySequence_InPlaceConcat(PyObject *o1, PyObject *o2); PySequence_InPlaceRepeat(PyObject *o, int count); They call either the Python class hooks (if either of the objects is a Python class instance) or the C type's number or sequence methods. The new bytecodes are: INPLACE_ADD INPLACE_SUBTRACT INPLACE_MULTIPLY INPLACE_DIVIDE INPLACE_REMAINDER INPLACE_POWER INPLACE_LEFTSHIFT INPLACE_RIGHTSHIFT INPLACE_AND INPLACE_XOR INPLACE_OR ROT_FOUR DUP_TOPX The INPLACE_* bytecodes mirror the BINARY_* bytecodes, except that they are implemented as calls to the InPlace API functions. The other two bytecodes are utility bytecodes: ROT_FOUR behaves like ROT_THREE except that the four topmost stack items are rotated. DUP_TOPX is a bytecode that takes a single argument, which should be an integer between 1 and 5 (inclusive) which is the number of items to duplicate in one block. Given a stack like this (where the right side of the list is the top of the stack): [1, 2, 3, 4, 5] DUP_TOPX 3 would duplicate the top 3 items, resulting in this stack: [1, 2, 3, 4, 5, 3, 4, 5] DUP_TOPX with an argument of 1 is the same as DUP_TOP. The limit of 5 is purely an implementation limit . The implementation of augmented assignment requires only DUP_TOPX with an argument of 2 and 3, and could do without this new opcode at the cost of a fair number of DUP_TOP and ROT_*. Open Issues The PyNumber_InPlace API is only a subset of the normal PyNumber API: only those functions that are required to support the augmented assignment syntax are included. If other in-place API functions are needed, they can be added later. The DUP_TOPX bytecode is a conveniency bytecode, and is not actually necessary. It should be considered whether this bytecode is worth having. There seems to be no other possible use for this bytecode at this time. Copyright This document has been placed in the public domain. References [1] http://www.python.org/pipermail/python-list/2000-June/059556.html [2] http://sourceforge.net/patch?func=detailpatch&patch_id=100699&group_id=5470
python-peps
2024-10-18T13:23:33.396881
2000-07-13T00:00:00
{ "license": "Public Domain", "url": "https://peps.python.org/pep-0203/", "authors": [ "Thomas Wouters" ], "pep_number": "0203", "pandoc_version": "3.5" }
0603
PEP: 603 Title: Adding a frozenmap type to collections Version: $Revision$ Last-Modified: $Date$ Author: Yury Selivanov <[email protected]> Discussions-To: https://discuss.python.org/t/pep-603-adding-a-frozenmap-type-to-collections/2318/ Status: Draft Type: Standards Track Content-Type: text/x-rst Created: 12-Sep-2019 Post-History: 12-Sep-2019 Abstract A persistent data structure is defined as a data structure that preserves the previous version of the data when the data is modified. Such data structures are effectively immutable, as operations on them do not update the structure in-place, but instead always yield a new updated structure (see[1] for more details.) This PEP proposes to add a new fully persistent and immutable mapping type called frozenmap to the collections module. The bulk of frozenmap's reference implementation is already used in CPython to implement the contextvars module. Rationale Python has two immutable collection types: tuple and frozenset. These types can be used to represent immutable lists and sets. However, a way to represent immutable mappings does not yet exist, and this PEP proposes a frozenmap to implement an immutable mapping. The proposed frozenmap type: - implements the collections.abc.Mapping protocol, - supports pickling, and - provides an API for efficient creation of "modified" versions. The following use cases illustrate why an immutable mapping is desirable: - Immutable mappings are hashable which allows their use as dictionary keys or set elements. This hashable property permits functions decorated with @functools.lru_cache() to accept immutable mappings as arguments. Unlike an immutable mapping, passing a plain dict to such a function results in error. - Immutable mappings can hold complex state. Since immutable mappings can be copied by reference, transactional mutation of state can be efficiently implemented. - Immutable mappings can be used to safely share dictionaries across thread and asynchronous task boundaries. The immutability makes it easier to reason about threads and asynchronous tasks. Lastly, CPython[2] already contains the main portion of the C code required for the frozenmap implementation. The C code already exists to implement the contextvars module (see PEP 567 for more details.) Exposing this C code via a public collection type drastically increases the number of users of the code. This leads to increased code quality by discovering bugs and improving performance which without a frozenmap collection would be very challenging because most programs use the contextvars module indirectly. Specification A new public immutable type frozenmap is added to the collections module. Construction frozenmap implements a dict-like construction API: - frozenmap() creates a new empty immutable mapping; - frozenmap(**kwargs) creates a mapping from **kwargs, e.g. frozenmap(x=10, y=0, z=-1) - frozenmap(collection) creates a mapping from the passed collection object. The passed collection object can be: - a dict, - another frozenmap, - an object with an items() method that is expected to return a series of key/value tuples, or - an iterable of key/value tuples. Data Access frozenmap implements the collection.abc.Mapping protocol. Therefore, getters, membership checks, and iteration work the same way that they would for a dict: m = frozenmap(foo='bar') assert m['foo'] == 'bar' assert m.get('foo') == 'bar' assert 'foo' in m assert 'baz' not in m assert m.get('baz', 'missing') == 'missing' assert m == m assert m != frozenmap() # m is not equal to an empty frozenmap assert len(m) == 1 # etc. Mutation frozenmap instances are immutable. That said, it is possible to efficiently produce mutated copies of the immutable instance. The complexity of mutation operations is O(log N) and the resulting frozenmap copies often consume very little additional memory due to the use of structural sharing (read[3] for more details.) frozenmap.including(key, value) The method creates a new frozenmap copy with a new key / value pair: m = frozenmap(foo=1) m2 = m.including('bar', 100) print(m) # will print frozenmap({'foo': 1}) print(m2) # will print frozenmap({'foo': 1, 'bar': 100}) frozenmap.excluding(key) The method produces a copy of the frozenmap which does not include a deleted key: m = frozenmap(foo=1, bar=100) m2 = m.excluding('foo') print(m) # will print frozenmap({'foo': 1, 'bar': 100}) print(m2) # will print frozenmap({'bar': 1}) m3 = m.excluding('spam') # will throw a KeyError('spam') frozenmap.union(mapping=None, **kw) The method produces a copy of the frozenmap and adds or modifies multiple key/values for the created copy. The signature of the method matches the signature of the frozenmap constructor: m = frozenmap(foo=1) m2 = m.union({'spam': 'ham'}) print(m2) # will print frozenmap({'foo': 1, 'spam': 'ham'}) m3 = m.union(foo=100, y=2) print(m3) # will print frozenmap({'foo': 100, 'y': 2}) print(m) # will print frozenmap({'foo': 1}) Calling the union() method to add/replace N keys is more efficient than calling the including() method N times. frozenmap.mutating() The method allows efficient copying of a frozenmap instance with multiple modifications applied. This method is especially useful when the frozenmap in question contains thousands of key/value pairs and there's a need to update many of them in a performance-critical section of the code. The frozenmap.mutating() method returns a mutable dict-like copy of the frozenmap object: an instance of collections.FrozenMapCopy. The FrozenMapCopy objects: - are copy-on-write views of the data of frozenmap instances they were created from; - are mutable, although any mutations on them do not affect the frozenmap instances they were created from; - can be passed to the frozenmap constructor; creating a frozenmap from a FrozenMapCopy object is an O(1) operation; - have O(log N) complexity for get/set operations; creating them is an O(1) operation; - have a FrozenMapCopy.close() method that prevents any further access/mutation of the data; - can be used as a context manager. The below example illustrates how mutating() can be used with a context manager: numbers = frozenmap((i, i ** 2) for i in range(1_000_000)) with numbers.mutating() as copy: for i in numbers: if not (numbers[i] % 997): del copy[i] numbers_without_997_multiples = frozenmap(copy) # at this point, *numbers* still has 1_000_000 key/values, and # *numbers_without_997_multiples* is a copy of *numbers* without # values that are multiples of 997. for i in numbers: if not (numbers[i] % 593): del copy[i] numbers_without_593_multiples = frozenmap(copy) print(copy[10]) # will print 100. print(copy[10]) # This will throw a ValueError as *copy* # has been closed when the "with" block # was executed. Iteration As frozenmap implements the standard collections.abc.Mapping protocol, so all expected methods of iteration are supported: assert list(m) == ['foo'] assert list(m.items()) == [('foo', 'bar')] assert list(m.keys()) == ['foo'] assert list(m.values()) == ['bar'] Iteration in frozenmap, unlike in dict, does not preserve the insertion order. Hashing frozenmap instances can be hashable just like tuple objects: hash(frozenmap(foo='bar')) # works hash(frozenmap(foo=[])) # will throw an error Typing It is possible to use the standard typing notation for frozenmaps: m: frozenmap[str, int] = frozenmap() Implementation The proposed frozenmap immutable type uses a Hash Array Mapped Trie (HAMT) data structure. Functional programming languages, like Clojure, use HAMT to efficiently implement immutable hash tables, vectors, and sets. HAMT The key design contract of HAMT is the guarantee of a predictable value when given the hash of a key. For a pair of key and value, the hash of the key can be used to determine the location of value in the hash map tree. Immutable mappings implemented with HAMT have O(log N) performance for set() and get() operations. This efficiency is possible because mutation operations only affect one branch of the tree, making it possible to reuse non-mutated branches, and, therefore, avoiding copying of unmodified data. Read more about HAMT in[4]. The CPython implementation[5] has a fairly detailed description of the algorithm as well. Performance [] The above chart demonstrates that: - frozenmap implemented with HAMT displays near O(1) performance for all benchmarked dictionary sizes. - dict.copy() becomes less efficient when using around 100-200 items. [] Figure 2 compares the lookup costs of dict versus a HAMT-based immutable mapping. HAMT lookup time is ~30% slower than Python dict lookups on average. This performance difference exists since traversing a shallow tree is less efficient than lookup in a flat continuous array. Further to that, quoting[6]: "[using HAMT] means that in practice while insertions, deletions, and lookups into a persistent hash array mapped trie have a computational complexity of O(log n), for most applications they are effectively constant time, as it would require an extremely large number of entries to make any operation take more than a dozen steps." Design Considerations Why "frozenmap" and not "FrozenMap" The lower-case "frozenmap" resonates well with the frozenset built-in as well as with types like collections.defaultdict. Why "frozenmap" and not "frozendict" "Dict" has a very specific meaning in Python: - a dict is a concrete implementation of abc.MutableMapping with O(1) get and set operations (frozenmap has O(log N) complexity); - Python dicts preserve insertion order. The proposed frozenmap does not have these mentioned properties. Instead, frozenmap has an O(log N) cost of set/get operations, and it only implements the abc.Mapping protocol. Implementation The full implementation of the proposed frozenmap type is available at[7]. The package includes C and pure Python implementations of the type. See also the HAMT collection implementation as part of the CPython project tree here:[8]. References Acknowledgments I thank Carol Willing, Łukasz Langa, Larry Hastings, and Guido van Rossum for their feedback, ideas, edits, and discussions around this PEP. Copyright This document is placed in the public domain or under the CC0-1.0-Universal license, whichever is more permissive. [1] https://en.wikipedia.org/wiki/Persistent_data_structure [2] https://github.com/python/cpython/blob/3.8/Python/hamt.c [3] https://en.wikipedia.org/wiki/Persistent_data_structure#Trees [4] https://en.wikipedia.org/wiki/Hash_array_mapped_trie#cite_note-bagwell-1 [5] https://github.com/python/cpython/blob/3.8/Python/hamt.c [6] https://en.wikipedia.org/wiki/Persistent_data_structure#Trees [7] https://github.com/MagicStack/immutables [8] https://github.com/python/cpython/blob/3.8/Python/hamt.c
python-peps
2024-10-18T13:23:33.420550
2019-09-12T00:00:00
{ "license": "Public Domain", "url": "https://peps.python.org/pep-0603/", "authors": [ "Yury Selivanov" ], "pep_number": "0603", "pandoc_version": "3.5" }
0285
PEP: 285 Title: Adding a bool type Author: Guido van Rossum <[email protected]> Status: Final Type: Standards Track Content-Type: text/x-rst Created: 08-Mar-2002 Python-Version: 2.3 Post-History: 08-Mar-2002, 30-Mar-2002, 03-Apr-2002 Abstract This PEP proposes the introduction of a new built-in type, bool, with two constants, False and True. The bool type would be a straightforward subtype (in C) of the int type, and the values False and True would behave like 0 and 1 in most respects (for example, False==0 and True==1 would be true) except repr() and str(). All built-in operations that conceptually return a Boolean result will be changed to return False or True instead of 0 or 1; for example, comparisons, the "not" operator, and predicates like isinstance(). Review I've collected enough feedback to last me a lifetime, so I declare the review period officially OVER. I had Chinese food today; my fortune cookie said "Strong and bitter words indicate a weak cause." It reminded me of some of the posts against this PEP... :-) Anyway, here are my BDFL pronouncements. (Executive summary: I'm not changing a thing; all variants are rejected.) 1) Should this PEP be accepted? => Yes. There have been many arguments against the PEP. Many of them were based on misunderstandings. I've tried to clarify some of the most common misunderstandings below in the main text of the PEP. The only issue that weighs at all for me is the tendency of newbies to write "if x == True" where "if x" would suffice. More about that below too. I think this is not a sufficient reason to reject the PEP. 2) Should str(True) return "True" or "1"? "1" might reduce backwards compatibility problems, but looks strange. (repr(True) would always return "True".) => "True". Almost all reviewers agree with this. 3) Should the constants be called 'True' and 'False' (similar to None) or 'true' and 'false' (as in C++, Java and C99)? => True and False. Most reviewers agree that consistency within Python is more important than consistency with other languages. 4) Should we strive to eliminate non-Boolean operations on bools in the future, through suitable warnings, so that for example True+1 would eventually (in Python 3000) be illegal? => No. There's a small but vocal minority that would prefer to see "textbook" bools that don't support arithmetic operations at all, but most reviewers agree with me that bools should always allow arithmetic operations. 5) Should operator.truth(x) return an int or a bool? => bool. Tim Peters believes it should return an int, but almost all other reviewers agree that it should return a bool. My rationale: operator.truth() exists to force a Boolean context on its argument (it calls the C API PyObject_IsTrue()). Whether the outcome is reported as int or bool is secondary; if bool exists there's no reason not to use it. (Under the PEP, operator.truth() now becomes an alias for bool(); that's fine.) 6) Should bool inherit from int? => Yes. In an ideal world, bool might be better implemented as a separate integer type that knows how to perform mixed-mode arithmetic. However, inheriting bool from int eases the implementation enormously (in part since all C code that calls PyInt_Check() will continue to work -- this returns true for subclasses of int). Also, I believe this is right in terms of substitutability: code that requires an int can be fed a bool and it will behave the same as 0 or 1. Code that requires a bool may not work when it is given an int; for example, 3 & 4 is 0, but both 3 and 4 are true when considered as truth values. 7) Should the name 'bool' be changed? => No. Some reviewers have argued for boolean instead of bool, because this would be easier to understand (novices may have heard of Boolean algebra but may not make the connection with bool) or because they hate abbreviations. My take: Python uses abbreviations judiciously (like 'def', 'int', 'dict') and I don't think these are a burden to understanding. To a newbie, it doesn't matter whether it's called a waffle or a bool; it's a new word, and they learn quickly what it means. One reviewer has argued to make the name 'truth'. I find this an unattractive name, and would actually prefer to reserve this term (in documentation) for the more abstract concept of truth values that already exists in Python. For example: "when a container is interpreted as a truth value, an empty container is considered false and a non-empty one is considered true." 8) Should we strive to require that Boolean operations (like "if", "and", "not") have a bool as an argument in the future, so that for example "if []:" would become illegal and would have to be written as "if bool([]):" ??? => No!!! Some people believe that this is how a language with a textbook Boolean type should behave. Because it was brought up, others have worried that I might agree with this position. Let me make my position on this quite clear. This is not part of the PEP's motivation and I don't intend to make this change. (See also the section "Clarification" below.) Rationale Most languages eventually grow a Boolean type; even C99 (the new and improved C standard, not yet widely adopted) has one. Many programmers apparently feel the need for a Boolean type; most Python documentation contains a bit of an apology for the absence of a Boolean type. I've seen lots of modules that defined constants "False=0" and "True=1" (or similar) at the top and used those. The problem with this is that everybody does it differently. For example, should you use "FALSE", "false", "False", "F" or even "f"? And should false be the value zero or None, or perhaps a truth value of a different type that will print as "true" or "false"? Adding a standard bool type to the language resolves those issues. Some external libraries (like databases and RPC packages) need to be able to distinguish between Boolean and integral values, and while it's usually possible to craft a solution, it would be easier if the language offered a standard Boolean type. This also applies to Jython: some Java classes have separately overloaded methods or constructors for int and boolean arguments. The bool type can be used to select the boolean variant. (The same is apparently the case for some COM interfaces.) The standard bool type can also serve as a way to force a value to be interpreted as a Boolean, which can be used to normalize Boolean values. When a Boolean value needs to be normalized to one of two values, bool(x) is much clearer than "not not x" and much more concise than if x: return 1 else: return 0 Here are some arguments derived from teaching Python. When showing people comparison operators etc. in the interactive shell, I think this is a bit ugly: >>> a = 13 >>> b = 12 >>> a > b 1 >>> If this was: >>> a > b True >>> it would require a millisecond less thinking each time a 0 or 1 was printed. There's also the issue (which I've seen baffling even experienced Pythonistas who had been away from the language for a while) that if you see: >>> cmp(a, b) 1 >>> cmp(a, a) 0 >>> you might be tempted to believe that cmp() also returned a truth value, whereas in reality it can return three different values (-1, 0, 1). If ints were not (normally) used to represent Booleans results, this would stand out much more clearly as something completely different. Specification The following Python code specifies most of the properties of the new type: class bool(int): def __new__(cls, val=0): # This constructor always returns an existing instance if val: return True else: return False def __repr__(self): if self: return "True" else: return "False" __str__ = __repr__ def __and__(self, other): if isinstance(other, bool): return bool(int(self) & int(other)) else: return int.__and__(self, other) __rand__ = __and__ def __or__(self, other): if isinstance(other, bool): return bool(int(self) | int(other)) else: return int.__or__(self, other) __ror__ = __or__ def __xor__(self, other): if isinstance(other, bool): return bool(int(self) ^ int(other)) else: return int.__xor__(self, other) __rxor__ = __xor__ # Bootstrap truth values through sheer willpower False = int.__new__(bool, 0) True = int.__new__(bool, 1) The values False and True will be singletons, like None. Because the type has two values, perhaps these should be called "doubletons"? The real implementation will not allow other instances of bool to be created. True and False will properly round-trip through pickling and marshalling; for example pickle.loads(pickle.dumps(True)) will return True, and so will marshal.loads(marshal.dumps(True)). All built-in operations that are defined to return a Boolean result will be changed to return False or True instead of 0 or 1. In particular, this affects comparisons (<, <=, ==, !=, >, >=, is, is not, in, not in), the unary operator 'not', the built-in functions callable(), hasattr(), isinstance() and issubclass(), the dict method has_key(), the string and unicode methods endswith(), isalnum(), isalpha(), isdigit(), islower(), isspace(), istitle(), isupper(), and startswith(), the unicode methods isdecimal() and isnumeric(), and the 'closed' attribute of file objects. The predicates in the operator module are also changed to return a bool, including operator.truth(). Because bool inherits from int, True+1 is valid and equals 2, and so on. This is important for backwards compatibility: because comparisons and so on currently return integer values, there's no way of telling what uses existing applications make of these values. It is expected that over time, the standard library will be updated to use False and True when appropriate (but not to require a bool argument type where previous an int was allowed). This change should not pose additional problems and is not specified in detail by this PEP. C API The header file "boolobject.h" defines the C API for the bool type. It is included by "Python.h" so there is no need to include it directly. The existing names Py_False and Py_True reference the unique bool objects False and True (previously these referenced static int objects with values 0 and 1, which were not unique amongst int values). A new API, PyObject *PyBool_FromLong(long), takes a C long int argument and returns a new reference to either Py_False (when the argument is zero) or Py_True (when it is nonzero). To check whether an object is a bool, the macro PyBool_Check() can be used. The type of bool instances is PyBoolObject *. The bool type object is available as PyBool_Type. Clarification This PEP does not change the fact that almost all object types can be used as truth values. For example, when used in an if statement, an empty list is false and a non-empty one is true; this does not change and there is no plan to ever change this. The only thing that changes is the preferred values to represent truth values when returned or assigned explicitly. Previously, these preferred truth values were 0 and 1; the PEP changes the preferred values to False and True, and changes built-in operations to return these preferred values. Compatibility Because of backwards compatibility, the bool type lacks many properties that some would like to see. For example, arithmetic operations with one or two bool arguments is allowed, treating False as 0 and True as 1. Also, a bool may be used as a sequence index. I don't see this as a problem, and I don't want evolve the language in this direction either. I don't believe that a stricter interpretation of "Booleanness" makes the language any clearer. Another consequence of the compatibility requirement is that the expression "True and 6" has the value 6, and similarly the expression "False or None" has the value None. The "and" and "or" operators are usefully defined to return the first argument that determines the outcome, and this won't change; in particular, they don't force the outcome to be a bool. Of course, if both arguments are bools, the outcome is always a bool. It can also easily be coerced into being a bool by writing for example "bool(x and y)". Resolved Issues (See also the Review section above.) - Because the repr() or str() of a bool value is different from an int value, some code (for example doctest-based unit tests, and possibly database code that relies on things like "%s" % truth) may fail. It is easy to work around this (without explicitly referencing the bool type), and it is expected that this only affects a very small amount of code that can easily be fixed. - Other languages (C99, C++, Java) name the constants "false" and "true", in all lowercase. For Python, I prefer to stick with the example set by the existing built-in constants, which all use CapitalizedWords: None, Ellipsis, NotImplemented (as well as all built-in exceptions). Python's built-in namespace uses all lowercase for functions and types only. - It has been suggested that, in order to satisfy user expectations, for every x that is considered true in a Boolean context, the expression x == True should be true, and likewise if x is considered false, x == False should be true. In particular newbies who have only just learned about Boolean variables are likely to write : if x == True: ... instead of the correct form, : if x: ... There seem to be strong psychological and linguistic reasons why many people are at first uncomfortable with the latter form, but I believe that the solution should be in education rather than in crippling the language. After all, == is general seen as a transitive operator, meaning that from a==b and b==c we can deduce a==c. But if any comparison to True were to report equality when the other operand was a true value of any type, atrocities like 6==True==7 would hold true, from which one could infer the falsehood 6==7. That's unacceptable. (In addition, it would break backwards compatibility. But even if it didn't, I'd still be against this, for the stated reasons.) Newbies should also be reminded that there's never a reason to write : if bool(x): ... since the bool is implicit in the "if". Explicit is not better than implicit here, since the added verbiage impairs readability and there's no other interpretation possible. There is, however, sometimes a reason to write : b = bool(x) This is useful when it is unattractive to keep a reference to an arbitrary object x, or when normalization is required for some other reason. It is also sometimes appropriate to write : i = int(bool(x)) which converts the bool to an int with the value 0 or 1. This conveys the intention to henceforth use the value as an int. Implementation A complete implementation in C has been uploaded to the SourceForge patch manager: https://bugs.python.org/issue528022 This will soon be checked into CVS for python 2.3a0. Copyright This document has been placed in the public domain.
python-peps
2024-10-18T13:23:33.436411
2002-03-08T00:00:00
{ "license": "Public Domain", "url": "https://peps.python.org/pep-0285/", "authors": [ "Guido van Rossum" ], "pep_number": "0285", "pandoc_version": "3.5" }
0373
PEP: 373 Title: Python 2.7 Release Schedule Version: $Revision$ Last-Modified: $Date$ Author: Benjamin Peterson <[email protected]> Status: Final Type: Informational Topic: Release Content-Type: text/x-rst Created: 03-Nov-2008 Python-Version: 2.7 Abstract This document describes the development and release schedule for Python 2.7. Python 2.7 is the end of the Python 2.x series, and is succeeded by Python 3. See the "Sunsetting Python 2" FAQ on python.org for a general overview. Update (April 2014) The End Of Life date (EOL, sunset date) for Python 2.7 has been moved five years into the future, to 2020. This decision was made to clarify the status of Python 2.7 and relieve worries for those users who cannot yet migrate to Python 3. See also PEP 466. This declaration does not guarantee that bugfix releases will be made on a regular basis, but it should enable volunteers who want to contribute bugfixes for Python 2.7 and it should satisfy vendors who still have to support Python 2 for years to come. There will be no Python 2.8 (see PEP 404). Release Manager and Crew Position Name --------------------- ------------------- 2.7 Release Manager Benjamin Peterson Windows installers Steve Dower Mac installers Ned Deily Maintenance releases Being the last of the 2.x series, 2.7 received bugfix support until 2020. Support officially stopped January 1 2020, and 2.7.18 code freeze occurred on January 1 2020, but the final release occurred after that date. Dates of previous maintenance releases: - 2.7.1 2010-11-27 - 2.7.2 2011-07-21 - 2.7.3rc1 2012-02-23 - 2.7.3rc2 2012-03-15 - 2.7.3 2012-03-09 - 2.7.4rc1 2013-03-23 - 2.7.4 2013-04-06 - 2.7.5 2013-05-12 - 2.7.6rc1 2013-10-26 - 2.7.6 2013-11-10 - 2.7.7rc1 2014-05-17 - 2.7.7 2014-05-31 - 2.7.8 2014-06-30 - 2.7.9rc1 2014-11-26 - 2.7.9 2014-12-10 - 2.7.10rc1 2015-05-09 - 2.7.10 2015-05-23 - 2.7.11rc1 2015-11-21 - 2.7.11 2015-12-05 - 2.7.12 2016-06-25 - 2.7.13rc1 2016-12-03 - 2.7.13 2016-12-17 - 2.7.14rc1 2017-08-26 - 2.7.14 2017-09-16 - 2.7.15rc1 2018-04-14 - 2.7.15 2018-05-01 - 2.7.16rc 2019-02-16 - 2.7.16 2019-03-02 - 2.7.17rc1 2019-10-05 - 2.7.17 2019-10-19 - 2.7.18rc1 2020-04-04 - 2.7.18 2020-04-20 2.7.0 Release Schedule The release schedule for 2.7.0 was: - 2.7 alpha 1 2009-12-05 - 2.7 alpha 2 2010-01-09 - 2.7 alpha 3 2010-02-06 - 2.7 alpha 4 2010-03-06 - 2.7 beta 1 2010-04-03 - 2.7 beta 2 2010-05-08 - 2.7 rc1 2010-06-05 - 2.7 rc2 2010-06-19 - 2.7 final 2010-07-03 Possible features for 2.7 Nothing here. [Note that a moratorium on core language changes is in effect.] References - "The Python 2 death march" on python-dev - Petition: abandon plans to ship a 2.7.18 in April - [RELEASE] Python 2.7.18, the end of an era Copyright This document has been placed in the public domain. Local Variables: mode: indented-text indent-tabs-mode: nil sentence-end-double-space: t fill-column: 70 coding: utf-8 End:
python-peps
2024-10-18T13:23:33.448885
2008-11-03T00:00:00
{ "license": "Public Domain", "url": "https://peps.python.org/pep-0373/", "authors": [ "Benjamin Peterson" ], "pep_number": "0373", "pandoc_version": "3.5" }
0319
PEP: 319 Title: Python Synchronize/Asynchronize Block Version: $Revision$ Last-Modified: $Date$ Author: Michel Pelletier <[email protected]> Status: Rejected Type: Standards Track Content-Type: text/x-rst Created: 24-Feb-2003 Python-Version: 2.4 Post-History: Abstract This PEP proposes adding two new keywords to Python, 'synchronize' and 'asynchronize'. Pronouncement This PEP is rejected in favor of PEP 343. The 'synchronize' Keyword The concept of code synchronization in Python is too low-level. To synchronize code a programmer must be aware of the details of the following pseudo-code pattern: initialize_lock() ... acquire_lock() try: change_shared_data() finally: release_lock() This synchronized block pattern is not the only pattern (more discussed below) but it is very common. This PEP proposes replacing the above code with the following equivalent: synchronize: change_shared_data() The advantages of this scheme are simpler syntax and less room for user error. Currently users are required to write code about acquiring and releasing thread locks in 'try/finally' blocks; errors in this code can cause notoriously difficult concurrent thread locking issues. The 'asynchronize' Keyword While executing a 'synchronize' block of code a programmer may want to "drop back" to running asynchronously momentarily to run blocking input/output routines or something else that might take an indeterminate amount of time and does not require synchronization. This code usually follows the pattern: initialize_lock() ... acquire_lock() try: change_shared_data() release_lock() # become async do_blocking_io() acquire_lock() # sync again change_shared_data2() finally: release_lock() The asynchronous section of the code is not very obvious visually, so it is marked up with comments. Using the proposed 'asynchronize' keyword this code becomes much cleaner, easier to understand, and less prone to error: synchronize: change_shared_data() asynchronize: do_blocking_io() change_shared_data2() Encountering an 'asynchronize' keyword inside a non-synchronized block can raise either an error or issue a warning (as all code blocks are implicitly asynchronous anyway). It is important to note that the above example is not the same as: synchronize: change_shared_data() do_blocking_io() synchronize: change_shared_data2() Because both synchronized blocks of code may be running inside the same iteration of a loop, Consider: while in_main_loop(): synchronize: change_shared_data() asynchronize: do_blocking_io() change_shared_data2() Many threads may be looping through this code. Without the 'asynchronize' keyword one thread cannot stay in the loop and release the lock at the same time while blocking IO is going on. This pattern of releasing locks inside a main loop to do blocking IO is used extensively inside the CPython interpreter itself. Synchronization Targets As proposed the 'synchronize' and 'asynchronize' keywords synchronize a block of code. However programmers may want to specify a target object that threads synchronize on. Any object can be a synchronization target. Consider a two-way queue object: two different objects are used by the same 'synchronize' code block to synchronize both queues separately in the 'get' method: class TwoWayQueue: def __init__(self): self.front = [] self.rear = [] def putFront(self, item): self.put(item, self.front) def getFront(self): item = self.get(self.front) return item def putRear(self, item): self.put(item, self.rear) def getRear(self): item = self.get(self.rear) return item def put(self, item, queue): synchronize queue: queue.append(item) def get(self, queue): synchronize queue: item = queue[0] del queue[0] return item Here is the equivalent code in Python as it is now without a 'synchronize' keyword: import thread class LockableQueue: def __init__(self): self.queue = [] self.lock = thread.allocate_lock() class TwoWayQueue: def __init__(self): self.front = LockableQueue() self.rear = LockableQueue() def putFront(self, item): self.put(item, self.front) def getFront(self): item = self.get(self.front) return item def putRear(self, item): self.put(item, self.rear) def getRear(self): item = self.get(self.rear) return item def put(self, item, queue): queue.lock.acquire() try: queue.append(item) finally: queue.lock.release() def get(self, queue): queue.lock.acquire() try: item = queue[0] del queue[0] return item finally: queue.lock.release() The last example had to define an extra class to associate a lock with the queue where the first example the 'synchronize' keyword does this association internally and transparently. Other Patterns that Synchronize There are some situations where the 'synchronize' and 'asynchronize' keywords cannot entirely replace the use of lock methods like acquire and release. Some examples are if the programmer wants to provide arguments for acquire or if a lock is acquired in one code block but released in another, as shown below. Here is a class from Zope modified to use both the 'synchronize' and 'asynchronize' keywords and also uses a pool of explicit locks that are acquired and released in different code blocks and thus don't use 'synchronize': import thread from ZServerPublisher import ZServerPublisher class ZRendevous: def __init__(self, n=1): pool=[] self._lists=pool, [], [] synchronize: while n > 0: l=thread.allocate_lock() l.acquire() pool.append(l) thread.start_new_thread(ZServerPublisher, (self.accept,)) n=n-1 def accept(self): synchronize: pool, requests, ready = self._lists while not requests: l=pool[-1] del pool[-1] ready.append(l) asynchronize: l.acquire() pool.append(l) r=requests[0] del requests[0] return r def handle(self, name, request, response): synchronize: pool, requests, ready = self._lists requests.append((name, request, response)) if ready: l=ready[-1] del ready[-1] l.release() Here is the original class as found in the 'Zope/ZServer/PubCore/ZRendevous.py' module. The "convenience" of the '_a' and '_r' shortcut names obscure the code: import thread from ZServerPublisher import ZServerPublisher class ZRendevous: def __init__(self, n=1): sync=thread.allocate_lock() self._a=sync.acquire self._r=sync.release pool=[] self._lists=pool, [], [] self._a() try: while n > 0: l=thread.allocate_lock() l.acquire() pool.append(l) thread.start_new_thread(ZServerPublisher, (self.accept,)) n=n-1 finally: self._r() def accept(self): self._a() try: pool, requests, ready = self._lists while not requests: l=pool[-1] del pool[-1] ready.append(l) self._r() l.acquire() self._a() pool.append(l) r=requests[0] del requests[0] return r finally: self._r() def handle(self, name, request, response): self._a() try: pool, requests, ready = self._lists requests.append((name, request, response)) if ready: l=ready[-1] del ready[-1] l.release() finally: self._r() In particular the asynchronize section of the accept method is not very obvious. To beginner programmers, 'synchronize' and 'asynchronize' remove many of the problems encountered when juggling multiple acquire and release methods on different locks in different try/finally blocks. Formal Syntax Python syntax is defined in a modified BNF grammar notation described in the Python Language Reference[1]. This section describes the proposed synchronization syntax using this grammar: synchronize_stmt: 'synchronize' [test] ':' suite asynchronize_stmt: 'asynchronize' [test] ':' suite compound_stmt: ... | synchronized_stmt | asynchronize_stmt (The '...' indicates other compound statements elided). Proposed Implementation The author of this PEP has not explored an implementation yet. There are several implementation issues that must be resolved. The main implementation issue is what exactly gets locked and unlocked during a synchronized block. During an unqualified synchronized block (the use of the 'synchronize' keyword without a target argument) a lock could be created and associated with the synchronized code block object. Any threads that are to execute the block must first acquire the code block lock. When an 'asynchronize' keyword is encountered in a 'synchronize' block the code block lock is unlocked before the inner block is executed and re-locked when the inner block terminates. When a synchronized block target is specified the object is associated with a lock. How this is implemented cleanly is probably the highest risk of this proposal. Java Virtual Machines typically associate a special hidden lock object with target object and use it to synchronized the block around the target only. Backward Compatibility Backward compatibility is solved with the new from __future__ Python syntax (PEP 236), and the new warning framework (PEP 230) to evolve the Python language into phasing out any conflicting names that use the new keywords 'synchronize' and 'asynchronize'. To use the syntax now, a developer could use the statement: from __future__ import threadsync # or whatever In addition, any code that uses the keyword 'synchronize' or 'asynchronize' as an identifier will be issued a warning from Python. After the appropriate period of time, the syntax would become standard, the above import statement would do nothing, and any identifiers named 'synchronize' or 'asynchronize' would raise an exception. PEP 310 Reliable Acquisition/Release Pairs PEP 310 proposes the 'with' keyword that can serve the same function as 'synchronize' (but no facility for 'asynchronize'). The pattern: initialize_lock() with the_lock: change_shared_data() is equivalent to the proposed: synchronize the_lock: change_shared_data() PEP 310 must synchronize on an existing lock, while this PEP proposes that unqualified 'synchronize' statements synchronize on a global, internal, transparent lock in addition to qualified 'synchronize' statements. The 'with' statement also requires lock initialization, while the 'synchronize' statement can synchronize on any target object including locks. While limited in this fashion, the 'with' statement is more abstract and serves more purposes than synchronization. For example, transactions could be used with the 'with' keyword: initialize_transaction() with my_transaction: do_in_transaction() # when the block terminates, the transaction is committed. The 'synchronize' and 'asynchronize' keywords cannot serve this or any other general acquire/release pattern other than thread synchronization. How Java Does It Java defines a 'synchronized' keyword (note the grammatical tense different between the Java keyword and this PEP's 'synchronize') which must be qualified on any object. The syntax is: synchronized (Expression) Block Expression must yield a valid object (null raises an error and exceptions during 'Expression' terminate the 'synchronized' block for the same reason) upon which 'Block' is synchronized. How Jython Does It Jython uses a 'synchronize' class with the static method 'make_synchronized' that accepts one callable argument and returns a newly created, synchronized, callable "wrapper" around the argument. Summary of Proposed Changes to Python Adding new 'synchronize' and 'asynchronize' keywords to the language. Risks This PEP proposes adding two keywords to the Python language. This may break code. There is no implementation to test. It's not the most important problem facing Python programmers today (although it is a fairly notorious one). The equivalent Java keyword is the past participle 'synchronized'. This PEP proposes the present tense, 'synchronize' as being more in spirit with Python (there being less distinction between compile-time and run-time in Python than Java). Dissenting Opinion This PEP has not been discussed on python-dev. References Copyright This document has been placed in the public domain. [1] The Python Language Reference http://docs.python.org/reference/
python-peps
2024-10-18T13:23:33.460291
2003-02-24T00:00:00
{ "license": "Public Domain", "url": "https://peps.python.org/pep-0319/", "authors": [ "Michel Pelletier" ], "pep_number": "0319", "pandoc_version": "3.5" }
0693
PEP: 693 Title: Python 3.12 Release Schedule Author: Thomas Wouters <[email protected]> Status: Active Type: Informational Topic: Release Created: 24-May-2022 Python-Version: 3.12 Abstract This document describes the development and release schedule for Python 3.12. Release Manager and Crew - 3.12 Release Manager: Thomas Wouters - Windows installers: Steve Dower - Mac installers: Ned Deily - Documentation: Julien Palard Release Schedule 3.12.0 schedule Note: the dates below use a 17-month development period that results in a 12-month release cadence between feature versions, as defined by PEP 602. Actual: - 3.12 development begins: Sunday, 2022-05-08 - 3.12.0 alpha 1: Monday, 2022-10-24 - 3.12.0 alpha 2: Monday, 2022-11-14 - 3.12.0 alpha 3: Tuesday, 2022-12-06 - 3.12.0 alpha 4: Tuesday, 2023-01-10 - 3.12.0 alpha 5: Tuesday, 2023-02-07 - 3.12.0 alpha 6: Tuesday, 2023-03-07 - 3.12.0 alpha 7: Tuesday, 2023-04-04 - 3.12.0 beta 1: Monday, 2023-05-22 (No new features beyond this point.) - 3.12.0 beta 2: Tuesday, 2023-06-06 - 3.12.0 beta 3: Monday, 2023-06-19 - 3.12.0 beta 4: Tuesday, 2023-07-11 - 3.12.0 candidate 1: Sunday, 2023-08-06 - 3.12.0 candidate 2: Wednesday, 2023-09-06 - 3.12.0 candidate 3: Tuesday, 2023-09-19 - 3.12.0 final: Monday, 2023-10-02 Bugfix releases Actual: - 3.12.1: Thursday, 2023-12-07 - 3.12.2: Tuesday, 2024-02-06 - 3.12.3: Tuesday, 2024-04-09 - 3.12.4: Thursday, 2024-06-06 - 3.12.5: Tuesday, 2024-08-06 - 3.12.6: Friday, 2024-09-06 - 3.12.7: Tuesday, 2024-10-01 Expected: - 3.12.8: Tuesday, 2024-12-03 - 3.12.9: Tuesday, 2025-02-04 - 3.12.10: Tuesday, 2025-04-08 Source-only security fix releases Provided irregularly on an as-needed basis until October 2028. 3.12 Lifespan 3.12 will receive bugfix updates approximately every 2 months for approximately 18 months. Some time after the release of 3.13.0 final, the ninth and final 3.12 bugfix update will be released. After that, it is expected that security updates (source only) will be released until 5 years after the release of 3.12.0 final, so until approximately October 2028. Features for 3.12 New features can be found in What’s New In Python 3.12. Copyright This document is placed in the public domain or under the CC0-1.0-Universal license, whichever is more permissive.
python-peps
2024-10-18T13:23:33.470636
2022-05-24T00:00:00
{ "license": "Public Domain", "url": "https://peps.python.org/pep-0693/", "authors": [ "Thomas Wouters" ], "pep_number": "0693", "pandoc_version": "3.5" }
0348
PEP: 348 Title: Exception Reorganization for Python 3.0 Version: $Revision$ Last-Modified: $Date$ Author: Brett Cannon <[email protected]> Status: Rejected Type: Standards Track Content-Type: text/x-rst Created: 28-Jul-2005 Post-History: Note This PEP has been rejected[1]. Abstract Python, as of version 2.4, has 38 exceptions (including warnings) in the built-in namespace in a rather shallow hierarchy. These classes have come about over the years without a chance to learn from experience. This PEP proposes doing a reorganization of the hierarchy for Python 3.0 when backwards-compatibility is not as much of an issue. Along with this reorganization, adding a requirement that all objects passed to a raise statement must inherit from a specific superclass is proposed. This is to have guarantees about the basic interface of exceptions and to further enhance the natural hierarchy of exceptions. Lastly, bare except clauses will be changed to be semantically equivalent to except Exception. Most people currently use bare except clause for this purpose and with the exception hierarchy reorganization becomes a viable default. Rationale For Wanting Change Exceptions are a critical part of Python. While exceptions are traditionally used to signal errors in a program, they have also grown to be used for flow control for things such as iterators. While their importance is great, there is a lack of structure to them. This stems from the fact that any object can be raised as an exception. Because of this you have no guarantee in terms of what kind of object will be raised, destroying any possible hierarchy raised objects might adhere to. But exceptions do have a hierarchy, showing the severity of the exception. The hierarchy also groups related exceptions together to simplify catching them in except clauses. To allow people to be able to rely on this hierarchy, a common superclass that all raise objects must inherit from is being proposed. It also allows guarantees about the interface to raised objects to be made (see PEP 344). A discussion about all of this has occurred before on python-dev[2]. As bare except clauses stand now, they catch all exceptions. While this can be handy, it is rather overreaching for the common case. Thanks to having a required superclass, catching all exceptions is as easy as catching just one specific exception. This allows bare except clauses to be used for a more useful purpose. Once again, this has been discussed on python-dev[3]. Finally, slight changes to the exception hierarchy will make it much more reasonable in terms of structure. By minor rearranging exceptions that should not typically be caught can be allowed to propagate to the top of the execution stack, terminating the interpreter as intended. Philosophy of Reorganization For the reorganization of the hierarchy, there was a general philosophy followed that developed from discussion of earlier drafts of this PEP[4],[5], [6],[7], [8],[9]. First and foremost was to not break anything that works. This meant that renaming exceptions was out of the question unless the name was deemed severely bad. This also meant no removal of exceptions unless they were viewed as truly misplaced. The introduction of new exceptions were only done in situations where there might be a use for catching a superclass of a category of exceptions. Lastly, existing exceptions would have their inheritance tree changed only if it was felt they were truly misplaced to begin with. For all new exceptions, the proper suffix had to be chosen. For those that signal an error, "Error" is to be used. If the exception is a warning, then "Warning". "Exception" is to be used when none of the other suffixes are proper to use and no specific suffix is a better fit. After that it came down to choosing which exceptions should and should not inherit from Exception. This was for the purpose of making bare except clauses more useful. Lastly, the entire existing hierarchy had to inherit from the new exception meant to act as the required superclass for all exceptions to inherit from. New Hierarchy Note Exceptions flagged with "stricter inheritance" will no longer inherit from a certain class. A "broader inheritance" flag means a class has been added to the exception's inheritance tree. All comparisons are against the Python 2.4 exception hierarchy. +-- BaseException (new; broader inheritance for subclasses) +-- Exception +-- GeneratorExit (defined in PEP 342) +-- StandardError +-- ArithmeticError +-- DivideByZeroError +-- FloatingPointError +-- OverflowError +-- AssertionError +-- AttributeError +-- EnvironmentError +-- IOError +-- EOFError +-- OSError +-- ImportError +-- LookupError +-- IndexError +-- KeyError +-- MemoryError +-- NameError +-- UnboundLocalError +-- NotImplementedError (stricter inheritance) +-- SyntaxError +-- IndentationError +-- TabError +-- TypeError +-- RuntimeError +-- UnicodeError +-- UnicodeDecodeError +-- UnicodeEncodeError +-- UnicodeTranslateError +-- ValueError +-- ReferenceError +-- StopIteration +-- SystemError +-- Warning +-- DeprecationWarning +-- FutureWarning +-- PendingDeprecationWarning +-- RuntimeWarning +-- SyntaxWarning +-- UserWarning + -- WindowsError +-- KeyboardInterrupt (stricter inheritance) +-- SystemExit (stricter inheritance) Differences Compared to Python 2.4 A more thorough explanation of terms is needed when discussing inheritance changes. Inheritance changes result in either broader or more restrictive inheritance. "Broader" is when a class has an inheritance tree like cls, A and then becomes cls, B, A. "Stricter" is the reverse. BaseException The superclass that all exceptions must inherit from. It's name was chosen to reflect that it is at the base of the exception hierarchy while being an exception itself. "Raisable" was considered as a name, it was passed on because its name did not properly reflect the fact that it is an exception itself. Direct inheritance of BaseException is not expected, and will be discouraged for the general case. Most user-defined exceptions should inherit from Exception instead. This allows catching Exception to continue to work in the common case of catching all exceptions that should be caught. Direct inheritance of BaseException should only be done in cases where an entirely new category of exception is desired. But, for cases where all exceptions should be caught blindly, except BaseException will work. KeyboardInterrupt and SystemExit Both exceptions are no longer under Exception. This is to allow bare except clauses to act as a more viable default case by catching exceptions that inherit from Exception. With both KeyboardInterrupt and SystemExit acting as signals that the interpreter is expected to exit, catching them in the common case is the wrong semantics. NotImplementedError Inherits from Exception instead of from RuntimeError. Originally inheriting from RuntimeError, NotImplementedError does not have any direct relation to the exception meant for use in user code as a quick-and-dirty exception. Thus it now directly inherits from Exception. Required Superclass for raise By requiring all objects passed to a raise statement to inherit from a specific superclass, all exceptions are guaranteed to have certain attributes. If PEP 344 is accepted, the attributes outlined there will be guaranteed to be on all exceptions raised. This should help facilitate debugging by making the querying of information from exceptions much easier. The proposed hierarchy has BaseException as the required base class. Implementation Enforcement is straightforward. Modifying RAISE_VARARGS to do an inheritance check first before raising an exception should be enough. For the C API, all functions that set an exception will have the same inheritance check applied. Bare except Clauses Catch Exception In most existing Python 2.4 code, bare except clauses are too broad in the exceptions they catch. Typically only exceptions that signal an error are desired to be caught. This means that exceptions that are used to signify that the interpreter should exit should not be caught in the common case. With KeyboardInterrupt and SystemExit moved to inherit from BaseException instead of Exception, changing bare except clauses to act as except Exception becomes a much more reasonable default. This change also will break very little code since these semantics are what most people want for bare except clauses. The complete removal of bare except clauses has been argued for. The case has been made that they violate both Only One Way To Do It (OOWTDI) and Explicit Is Better Than Implicit (EIBTI) as listed in the Zen of Python <20>. But Practicality Beats Purity (PBP), also in the Zen of Python, trumps both of these in this case. The BDFL has stated that bare except clauses will work this way [10]. Implementation The compiler will emit the bytecode for except Exception whenever a bare except clause is reached. Transition Plan Because of the complexity and clutter that would be required to add all features planned in this PEP, the transition plan is very simple. In Python 2.5 BaseException is added. In Python 3.0, all remaining features (required superclass, change in inheritance, bare except clauses becoming the same as except Exception) will go into affect. In order to make all of this work in a backwards-compatible way in Python 2.5 would require very deep hacks in the exception machinery which could be error-prone and lead to a slowdown in performance for little benefit. To help with the transition, the documentation will be changed to reflect several programming guidelines: - When one wants to catch all exceptions, catch BaseException - To catch all exceptions that do not represent the termination of the interpreter, catch Exception explicitly - Explicitly catch KeyboardInterrupt and SystemExit; don't rely on inheritance from Exception to lead to the capture - Always catch NotImplementedError explicitly instead of relying on the inheritance from RuntimeError The documentation for the 'exceptions' module[11], tutorial[12], and PEP 290 will all require updating. Rejected Ideas DeprecationWarning Inheriting From PendingDeprecationWarning This was originally proposed because a DeprecationWarning can be viewed as a PendingDeprecationWarning that is being removed in the next version. But since enough people thought the inheritance could logically work the other way around, the idea was dropped. AttributeError Inheriting From TypeError or NameError Viewing attributes as part of the interface of a type caused the idea of inheriting from TypeError. But that partially defeats the thinking of duck typing and thus the idea was dropped. Inheriting from NameError was suggested because objects can be viewed as having their own namespace where the attributes live and when an attribute is not found it is a namespace failure. This was also dropped as a possibility since not everyone shared this view. Removal of EnvironmentError Originally proposed based on the idea that EnvironmentError was an unneeded distinction, the BDFL overruled this idea[13]. Introduction of MacError and UnixError Proposed to add symmetry to WindowsError, the BDFL said they won't be used enough[14]. The idea of then removing WindowsError was proposed and accepted as reasonable, thus completely negating the idea of adding these exceptions. SystemError Subclassing SystemExit Proposed because a SystemError is meant to lead to a system exit, the idea was removed since CriticalError indicates this better. ControlFlowException Under Exception It has been suggested that ControlFlowException should inherit from Exception. This idea has been rejected based on the thinking that control flow exceptions typically do not all need to be caught by a single except clause. Rename NameError to NamespaceError NameError is considered more succinct and leaves open no possible mistyping of the capitalization of "Namespace"[15]. Renaming RuntimeError or Introducing SimpleError The thinking was that RuntimeError was in no way an obvious name for an exception meant to be used when a situation did not call for the creation of a new exception. The renaming was rejected on the basis that the exception is already used throughout the interpreter [16]. Rejection of SimpleError was founded on the thought that people should be free to use whatever exception they choose and not have one so blatantly suggested[17]. Renaming Existing Exceptions Various renamings were suggested but non garnered more than a +0 vote (renaming ReferenceError to WeakReferenceError). The thinking was that the existing names were fine and no one had actively complained about them ever. To minimize backwards-compatibility issues and causing existing Python programmers extra pain, the renamings were removed. Have EOFError Subclass IOError The original thought was that since EOFError deals directly with I/O, it should subclass IOError. But since EOFError is used more as a signal that an event has occurred (the exhaustion of an I/O port), it should not subclass such a specific error exception. Have MemoryError and SystemError Have a Common Superclass Both classes deal with the interpreter, so why not have them have a common superclass? Because one of them means that the interpreter is in a state that it should not recover from while the other does not. Common Superclass for PendingDeprecationWarning and DeprecationWarning Grouping the deprecation warning exceptions together makes intuitive sense. But this sensical idea does not extend well when one considers how rarely either warning is used, let along at the same time. Removing WindowsError Originally proposed based on the idea that having such a platform-specific exception should not be in the built-in namespace. It turns out, though, enough code exists that uses the exception to warrant it staying. Superclass for KeyboardInterrupt and SystemExit Proposed to make catching non-Exception inheriting exceptions easier along with easing the transition to the new hierarchy, the idea was rejected by the BDFL[18]. The argument that existing code did not show enough instances of the pair of exceptions being caught and thus did not justify cluttering the built-in namespace was used. Acknowledgements Thanks to Robert Brewer, Josiah Carlson, Alyssa Coghlan, Timothy Delaney, Jack Diedrich, Fred L. Drake, Jr., Philip J. Eby, Greg Ewing, James Y. Knight, MA Lemburg, Guido van Rossum, Stephen J. Turnbull, Raymond Hettinger, and everyone else I missed for participating in the discussion. References Copyright This document has been placed in the public domain. Local Variables: mode: indented-text indent-tabs-mode: nil sentence-end-double-space: t fill-column: 70 End: [1] python-dev email (Bare except clauses in PEP 348) https://mail.python.org/pipermail/python-dev/2005-August/055676.html [2] python-dev Summary (An exception is an exception, unless it doesn't inherit from Exception) http://www.python.org/dev/summary/2004-08-01_2004-08-15.html#an-exception-is-an-exception-unless-it-doesn-t-inherit-from-exception [3] python-dev email (PEP, take 2: Exception Reorganization for Python 3.0) https://mail.python.org/pipermail/python-dev/2005-August/055116.html [4] python-dev thread (Pre-PEP: Exception Reorganization for Python 3.0) https://mail.python.org/pipermail/python-dev/2005-July/055020.html, https://mail.python.org/pipermail/python-dev/2005-August/055065.html [5] python-dev thread (PEP, take 2: Exception Reorganization for Python 3.0) https://mail.python.org/pipermail/python-dev/2005-August/055103.html [6] python-dev thread (Reorg PEP checked in) https://mail.python.org/pipermail/python-dev/2005-August/055138.html [7] python-dev thread (Major revision of PEP 348 committed) https://mail.python.org/pipermail/python-dev/2005-August/055199.html [8] python-dev thread (Exception Reorg PEP revised yet again) https://mail.python.org/pipermail/python-dev/2005-August/055292.html [9] python-dev thread (PEP 348 (exception reorg) revised again) https://mail.python.org/pipermail/python-dev/2005-August/055412.html [10] python-dev email (PEP 348 (exception reorg) revised again) https://mail.python.org/pipermail/python-dev/2005-August/055423.html [11] exceptions module http://docs.python.org/library/exceptions.html [12] Python Tutorial http://docs.python.org/tutorial/ [13] python-dev email (Pre-PEP: Exception Reorganization for Python 3.0) https://mail.python.org/pipermail/python-dev/2005-July/055019.html [14] python-dev email (Pre-PEP: Exception Reorganization for Python 3.0) https://mail.python.org/pipermail/python-dev/2005-July/055019.html [15] python-dev email (PEP, take 2: Exception Reorganization for Python 3.0) https://mail.python.org/pipermail/python-dev/2005-August/055159.html [16] python-dev email (Exception Reorg PEP checked in) https://mail.python.org/pipermail/python-dev/2005-August/055149.html [17] python-dev email (Exception Reorg PEP checked in) https://mail.python.org/pipermail/python-dev/2005-August/055175.html [18] python-dev email (PEP 348 (exception reorg) revised again) https://mail.python.org/pipermail/python-dev/2005-August/055423.html
python-peps
2024-10-18T13:23:33.493056
2005-07-28T00:00:00
{ "license": "Public Domain", "url": "https://peps.python.org/pep-0348/", "authors": [ "Brett Cannon" ], "pep_number": "0348", "pandoc_version": "3.5" }
3112
PEP: 3112 Title: Bytes literals in Python 3000 Version: $Revision$ Last-Modified: $Date$ Author: Jason Orendorff <[email protected]> Status: Final Type: Standards Track Content-Type: text/x-rst Requires: 358 Created: 23-Feb-2007 Python-Version: 3.0 Post-History: 23-Feb-2007 Abstract This PEP proposes a literal syntax for the bytes objects introduced in PEP 358. The purpose is to provide a convenient way to spell ASCII strings and arbitrary binary data. Motivation Existing spellings of an ASCII string in Python 3000 include: bytes('Hello world', 'ascii') 'Hello world'.encode('ascii') The proposed syntax is: b'Hello world' Existing spellings of an 8-bit binary sequence in Python 3000 include: bytes([0x7f, 0x45, 0x4c, 0x46, 0x01, 0x01, 0x01, 0x00]) bytes('\x7fELF\x01\x01\x01\0', 'latin-1') '7f454c4601010100'.decode('hex') The proposed syntax is: b'\x7f\x45\x4c\x46\x01\x01\x01\x00' b'\x7fELF\x01\x01\x01\0' In both cases, the advantages of the new syntax are brevity, some small efficiency gain, and the detection of encoding errors at compile time rather than at runtime. The brevity benefit is especially felt when using the string-like methods of bytes objects: lines = bdata.split(bytes('\n', 'ascii')) # existing syntax lines = bdata.split(b'\n') # proposed syntax And when converting code from Python 2.x to Python 3000: sok.send('EXIT\r\n') # Python 2.x sok.send('EXIT\r\n'.encode('ascii')) # Python 3000 existing sok.send(b'EXIT\r\n') # proposed Grammar Changes The proposed syntax is an extension of the existing string syntax[1]. The new syntax for strings, including the new bytes literal, is: stringliteral: [stringprefix] (shortstring | longstring) stringprefix: "b" | "r" | "br" | "B" | "R" | "BR" | "Br" | "bR" shortstring: "'" shortstringitem* "'" | '"' shortstringitem* '"' longstring: "'''" longstringitem* "'''" | '"""' longstringitem* '"""' shortstringitem: shortstringchar | escapeseq longstringitem: longstringchar | escapeseq shortstringchar: <any source character except "\" or newline or the quote> longstringchar: <any source character except "\"> escapeseq: "\" NL | "\\" | "\'" | '\"' | "\a" | "\b" | "\f" | "\n" | "\r" | "\t" | "\v" | "\ooo" | "\xhh" | "\uxxxx" | "\Uxxxxxxxx" | "\N{name}" The following additional restrictions apply only to bytes literals (stringliteral tokens with b or B in the stringprefix): - Each shortstringchar or longstringchar must be a character between 1 and 127 inclusive, regardless of any encoding declaration[2] in the source file. - The Unicode-specific escape sequences \u*xxxx*, \U*xxxxxxxx*, and \N{*name*} are unrecognized in Python 2.x and forbidden in Python 3000. Adjacent bytes literals are subject to the same concatenation rules as adjacent string literals[3]. A bytes literal adjacent to a string literal is an error. Semantics Each evaluation of a bytes literal produces a new bytes object. The bytes in the new object are the bytes represented by the shortstringitem or longstringitem parts of the literal, in the same order. Rationale The proposed syntax provides a cleaner migration path from Python 2.x to Python 3000 for most code involving 8-bit strings. Preserving the old 8-bit meaning of a string literal is usually as simple as adding a b prefix. The one exception is Python 2.x strings containing bytes >127, which must be rewritten using escape sequences. Transcoding a source file from one encoding to another, and fixing up the encoding declaration, should preserve the meaning of the program. Python 2.x non-Unicode strings violate this principle; Python 3000 bytes literals shouldn't. A string literal with a b in the prefix is always a syntax error in Python 2.5, so this syntax can be introduced in Python 2.6, along with the bytes type. A bytes literal produces a new object each time it is evaluated, like list displays and unlike string literals. This is necessary because bytes literals, like lists and unlike strings, are mutable[4]. Reference Implementation Thomas Wouters has checked an implementation into the Py3K branch, r53872. References Copyright This document has been placed in the public domain. [1] http://docs.python.org/reference/lexical_analysis.html#string-literals [2] http://docs.python.org/reference/lexical_analysis.html#encoding-declarations [3] http://docs.python.org/reference/lexical_analysis.html#string-literal-concatenation [4] https://mail.python.org/pipermail/python-3000/2007-February/005779.html
python-peps
2024-10-18T13:23:33.501353
2007-02-23T00:00:00
{ "license": "Public Domain", "url": "https://peps.python.org/pep-3112/", "authors": [ "Jason Orendorff" ], "pep_number": "3112", "pandoc_version": "3.5" }
0415
PEP: 415 Title: Implement context suppression with exception attributes Version: $Revision$ Last-Modified: $Date$ Author: Benjamin Peterson <[email protected]> BDFL-Delegate: Alyssa Coghlan Status: Final Type: Standards Track Content-Type: text/x-rst Created: 26-Feb-2012 Python-Version: 3.3 Post-History: 26-Feb-2012 Replaces: 409 Resolution: https://mail.python.org/pipermail/python-dev/2012-May/119467.html Abstract PEP 409 introduced support for the raise exc from None construct to allow the display of the exception context to be explicitly suppressed. This PEP retains the language level changes already implemented in PEP 409, but replaces the underlying implementation mechanism with a simpler approach based on a new __suppress_context__ attribute on all BaseException instances. PEP Acceptance This PEP was accepted by Alyssa Coghlan on the 14th of May, 2012. Rationale PEP 409 changes __cause__ to be Ellipsis by default. Then if __cause__ is set to None by raise exc from None, no context or cause will be printed should the exception be uncaught. The main problem with this scheme is it complicates the role of __cause__. __cause__ should indicate the cause of the exception not whether __context__ should be printed or not. This use of __cause__ is also not easily extended in the future. For example, we may someday want to allow the programmer to select which of __context__ and __cause__ will be printed. The PEP 409 implementation is not amenable to this. The use of Ellipsis is a hack. Before PEP 409, Ellipsis was used exclusively in extended slicing. Extended slicing has nothing to do with exceptions, so it's not clear to someone inspecting an exception object why __cause__ should be set to Ellipsis. Using Ellipsis by default for __cause__ makes it asymmetrical with __context__. Proposal A new attribute on BaseException, __suppress_context__, will be introduced. Whenever __cause__ is set, __suppress_context__ will be set to True. In particular, raise exc from cause syntax will set exc.__suppress_context__ to True. Exception printing code will check for that attribute to determine whether context and cause will be printed. __cause__ will return to its original purpose and values. There is precedence for __suppress_context__ with the print_line_and_file exception attribute. To summarize, raise exc from cause will be equivalent to: exc.__cause__ = cause raise exc where exc.__cause__ = cause implicitly sets exc.__suppress_context__. Patches There is a patch on Issue 14133. References Copyright This document has been placed in the public domain. Local Variables: mode: indented-text indent-tabs-mode: nil sentence-end-double-space: t fill-column: 70 coding: utf-8 End:
python-peps
2024-10-18T13:23:33.508970
2012-02-26T00:00:00
{ "license": "Public Domain", "url": "https://peps.python.org/pep-0415/", "authors": [ "Benjamin Peterson" ], "pep_number": "0415", "pandoc_version": "3.5" }
0254
PEP: 254 Title: Making Classes Look More Like Types Author: Guido van Rossum <[email protected]> Status: Rejected Type: Standards Track Content-Type: text/x-rst Created: 18-Jun-2001 Python-Version: 2.2 Post-History: Abstract This PEP has not been written yet. Watch this space! Status This PEP was a stub entry and eventually abandoned without having been filled-out. Substantially most of the intended functionality was implemented in Py2.2 with new-style types and classes. Copyright This document has been placed in the public domain.
python-peps
2024-10-18T13:23:33.511848
2001-06-18T00:00:00
{ "license": "Public Domain", "url": "https://peps.python.org/pep-0254/", "authors": [ "Guido van Rossum" ], "pep_number": "0254", "pandoc_version": "3.5" }
0607
PEP: 607 Title: Reducing CPython's Feature Delivery Latency Version: $Revision$ Last-Modified: $Date$ Author: Łukasz Langa <[email protected]>, Steve Dower <[email protected]>, Alyssa Coghlan <[email protected]> Discussions-To: https://discuss.python.org/t/pep-607-shared-background-for-the-release-cadence-peps/2528 Status: Final Type: Informational Content-Type: text/x-rst Created: 11-Oct-2019 Python-Version: 3.9 Post-History: 20-Oct-2019 Abstract PEP 602 and PEP 605 describe two alternative approaches to delivering smaller collections of features to Python's users more frequently (as compared to the current approach of offering new feature releases every 18-24 months, with the first binary alpha release taking place 6-8 months before the final release). Both PEPs also propose moving to a release cadence that results in full releases occurring at a consistent time of year (every year for PEP 602, every other year for PEP 605). This PEP (from the authors of both competing proposals) provides common background on why a change in the release cadence is considered desirable, as well as the perceived risks that both PEPs attempt to mitigate. Rationale for change Reducing the size of feature delivery batches When multiple large changes are delivered together, a complex investigation may be required to determine the root cause of any new issues that arise. Large batch sizes also make it more likely that problems will be encountered, given that they include larger pieces of relatively untested code. The easiest way to simplify those investigations and reduce the likelihood of users encountering problems is to reduce the size of the batches being shipped. PEP 602 proposes to address this problem via the straightforward approach of reducing CPython's typical batch size by 50%, shipping 12 months of changes each time, rather than accumulating 18+ months of changes. PEP 605 proposes to address it by regularly delivering 2 months worth of changes to a subset of Python's user base that opts in to running a rolling stream of beta releases (similar to running Windows Insider builds instead of the Windows retail release, or running Debian testing instead of Debian stable). Reducing the latency of feature delivery When only stable releases are seeing significant user adoption, and there's a long period of time between stable releases, it creates an incredibly strong temptation for developers to push changes into stable releases before they're really ready for general use. PEP 602 proposes to address this problem by reducing the period of time between stable releases to 12 months rather than 18 months. PEP 605 proposes to address it by actively creating a community of Python users that regularly install and use CPython beta releases, providing an incentive for core developers to start shipping changes earlier in the pre-release cycle, in order to obtain feedback before the feature gets locked down in a stable release. Aligning the release cadence with the calendar year While the current release cadence is nominally 18-24 months, in practice it has consistently been towards the 18 month end of that range. This means that the target dates for pre-releases and final releases move around from release to release, and the only way to remember them is to either look at the release PEP, or else to add those dates to your calendar. This is annoying for both individual volunteers and for corporate contributors, and also complicates alignment with events like PyCon US (typically April/May) and the now-annual core development sprints (typically in September). PEP 602 proposes to address this problem by publishing a new release in October every year, and basing the pre-release calendar for each year off that. PEP 605 proposes to address this problem by alternating between release years (where a new stable release is published in August), and non-release years (where only maintenance releases and new rolling beta releases are published). Improving the pre-release design feedback cycle One of the challenges of designing changes to the core interpreter and standard library APIs is that the user base in a position to provide feedback on nightly builds and the current pre-releases is relatively limited. This means that much user feedback isn't received until after an API design has already shipped in a full X.Y.0 release. If the API is a regular API, then deprecation cycles mean that it may take literally years to correct any design mistakes identified at that point. Marking APIs as provisional nominally offers a way to avoid that constraint, but actually taking advantage of that freedom causes other problems. PEP 602 proposes to address this problem by starting the alpha period immediately after the previous stable release. PEP 605 proposes to address this problem by actively promoting adoption of CPython pre-releases for running production workloads (not just for library and application compatibility testing), and adjusting the pre-release management process as necessary to make that a reasonable thing to do. (Note: some standard library APIs are amenable to initially being shipped as part of separately versioned packages via PyPI, and only later incorporated into the standard library. This section is more about the lower level APIs and non-library features where that approach to obtaining early design feedback doesn't apply) Risks to be mitigated While the status quo could stand to be improved in some respects, Python's popularity indicates that a lot of users and other participants in the wider Python ecosystem are happy enough with the current release management process. Python's user base is too large and too varied to cover all the potential downsides of changing our release cadence here, so instead this section just covers some of the points that have been specifically taken into account in the design of the PEPs. Impact on users and redistributors that already skip some releases It is already the case that not all users and redistributors update to every published CPython release series (for example, Debian stable and Ubuntu LTS sometimes skip releases due to the mismatch between their 24-month release cycles and CPython's typically 18-month cycle). The faster 12-month full release cadence in PEP 602 means that users in this category may end up skipping two releases where they would previously have only skipped one. However, the extended notice period for deprecations means that skipping a single release should no longer result in missed deprecation warnings. The slower 24-month full release cadence in PEP 605 may move some of the users that have historically been in this category into the "update to every stable release" category. Impact on users and redistributors that update to every release Many of Python's users never install a pre-release, but do update to every stable release series at some point after it is published. PEP 602 aims to mitigate the potential negative impact on members of this group by keeping the minimum gap between releases to 12 months, and retaining the 18 month full support period for each release. Keeping the 18-month full support period for each release branch means that the branches will spend roughly the same amount of time in full support and security-fix-only mode as they do now (~18 months and ~42 months, respectively). PEP 605 aims to mitigate the potential negative impact on members of this group by increasing use during the pre-release period to achieve more stable final releases with wider ecosystem support at launch. With a 24-month release cadence each release branch will spend proportionally more time in full support mode and less time in security-fix-only mode (~24 months and ~36 months, respectively). Full discussion of the impact on this group is left to the individual PEPs. Impact on users and redistributors of CPython nightly builds Despite the difficulties of doing so, there are already some users and redistributors that take on the challenge of using or publishing the CPython master branch directly. Neither PEP 602 nor PEP 605 should directly affect this group, but the rolling release stream proposal in PEP 605 aims to lower the barriers to more users adopting this style of usage, by allowing them to adopt the tested rolling beta stream, rather than needing to use the master branch directly. Impact on maintainers of third party libraries For maintainers of third party libraries, the key source of support complexity is the number of different Python versions in widespread use. PEP 602 aims to mitigate the potential negative impact on members of this group by keeping the minimum gap between full releases to 12 months. PEP 605 aims to mitigate the potential negative impact on members of this group by increasing the gap between full releases to 24 months, retaining the current policy of moving each release branch to security-fix-only mode not long after its successor is released, and retaining the "beta" naming scheme for the new rolling release stream (at least for the Python 3.9 release cycle). Full discussion of the impact on this group is left to the individual PEPs. Copyright This document is placed in the public domain or under the CC0-1.0-Universal license, whichever is more permissive. Local Variables: mode: indented-text indent-tabs-mode: nil sentence-end-double-space: t fill-column: 72 coding: utf-8 End:
python-peps
2024-10-18T13:23:33.523645
2019-10-11T00:00:00
{ "license": "Public Domain", "url": "https://peps.python.org/pep-0607/", "authors": [ "Łukasz Langa" ], "pep_number": "0607", "pandoc_version": "3.5" }
0310
PEP: 310 Title: Reliable Acquisition/Release Pairs Version: $Revision$ Last-Modified: $Date$ Author: Michael Hudson <[email protected]>, Paul Moore <[email protected]> Status: Rejected Type: Standards Track Content-Type: text/x-rst Created: 18-Dec-2002 Python-Version: 2.4 Post-History: Abstract It would be nice to have a less typing-intense way of writing: the_lock.acquire() try: .... finally: the_lock.release() This PEP proposes a piece of syntax (a 'with' block) and a "small-i" interface that generalizes the above. Pronouncement This PEP is rejected in favor of PEP 343. Rationale One of the advantages of Python's exception handling philosophy is that it makes it harder to do the "wrong" thing (e.g. failing to check the return value of some system call). Currently, this does not apply to resource cleanup. The current syntax for acquisition and release of a resource (for example, a lock) is: the_lock.acquire() try: .... finally: the_lock.release() This syntax separates the acquisition and release by a (possibly large) block of code, which makes it difficult to confirm "at a glance" that the code manages the resource correctly. Another common error is to code the "acquire" call within the try block, which incorrectly releases the lock if the acquire fails. Basic Syntax and Semantics The syntax of a 'with' statement is as follows: 'with' [ var '=' ] expr ':' suite This statement is defined as being equivalent to the following sequence of statements: var = expr if hasattr(var, "__enter__"): var.__enter__() try: suite finally: var.__exit__() (The presence of an __exit__ method is not checked like that of __enter__ to ensure that using inappropriate objects in with: statements gives an error). If the variable is omitted, an unnamed object is allocated on the stack. In that case, the suite has no access to the unnamed object. Possible Extensions A number of potential extensions to the basic syntax have been discussed on the Python Developers list. None of these extensions are included in the solution proposed by this PEP. In many cases, the arguments are nearly equally strong in both directions. In such cases, the PEP has always chosen simplicity, simply because where extra power is needed, the existing try block is available. Multiple expressions One proposal was for allowing multiple expressions within one 'with' statement. The __enter__ methods would be called left to right, and the __exit__ methods right to left. The advantage of doing so is that where more than one resource is being managed, nested 'with' statements will result in code drifting towards the right margin. The solution to this problem is the same as for any other deep nesting - factor out some of the code into a separate function. Furthermore, the question of what happens if one of the __exit__ methods raises an exception (should the other __exit__ methods be called?) needs to be addressed. Exception handling An extension to the protocol to include an optional __except__ handler, which is called when an exception is raised, and which can handle or re-raise the exception, has been suggested. It is not at all clear that the semantics of this extension can be made precise and understandable. For example, should the equivalent code be try ... except ... else if an exception handler is defined, and try ... finally if not? How can this be determined at compile time, in general? The alternative is to define the code as expanding to a try ... except inside a try ... finally. But this may not do the right thing in real life. The only use case identified for exception handling is with transactional processing (commit on a clean finish, and rollback on an exception). This is probably just as easy to handle with a conventional try ... except ... else block, and so the PEP does not include any support for exception handlers. Implementation Notes There is a potential race condition in the code specified as equivalent to the with statement. For example, if a KeyboardInterrupt exception is raised between the completion of the __enter__ method call and the start of the try block, the __exit__ method will not be called. This can lead to resource leaks, or to deadlocks. [XXX Guido has stated that he cares about this sort of race condition, and intends to write some C magic to handle them. The implementation of the 'with' statement should copy this.] Open Issues Should existing classes (for example, file-like objects and locks) gain appropriate __enter__ and __exit__ methods? The obvious reason in favour is convenience (no adapter needed). The argument against is that if built-in files have this but (say) StringIO does not, then code that uses "with" on a file object can't be reused with a StringIO object. So __exit__ = close becomes a part of the "file-like object" protocol, which user-defined classes may need to support. The __enter__ hook may be unnecessary - for many use cases, an adapter class is needed and in that case, the work done by the __enter__ hook can just as easily be done in the __init__ hook. If a way of controlling object lifetimes explicitly was available, the function of the __exit__ hook could be taken over by the existing __del__ hook. An email exchange[1] with a proponent of this approach left one of the authors even more convinced that it isn't the right idea... It has been suggested[2] that the "__exit__" method be called "close", or that a "close" method should be considered if no __exit__ method is found, to increase the "out-of-the-box utility" of the "with ..." construct. There are some similarities in concept between 'with ...' blocks and generators, which have led to proposals that for loops could implement the with block functionality[3]. While neat on some levels, we think that for loops should stick to being loops. Alternative Ideas IEXEC: Holger Krekel -- generalised approach with XML-like syntax (no URL found...). Holger has much more far-reaching ideas about "execution monitors" that are informed about details of control flow in the monitored block. While interesting, these ideas could change the language in deep and subtle ways and as such belong to a different PEP. Any Smalltalk/Ruby anonymous block style extension obviously subsumes this one. PEP 319 is in the same area, but did not win support when aired on python-dev. Backwards Compatibility This PEP proposes a new keyword, so the __future__ game will need to be played. Cost of Adoption Those who claim the language is getting larger and more complicated have something else to complain about. It's something else to teach. For the proposal to be useful, many file-like and lock-like classes in the standard library and other code will have to have : __exit__ = close or similar added. Cost of Non-Adoption Writing correct code continues to be more effort than writing incorrect code. References There are various python-list and python-dev discussions that could be mentioned here. Copyright This document has been placed in the public domain. [1] Off-list conversation between Michael Hudson and Bill Soudan (made public with permission) http://starship.python.net/crew/mwh/pep310/ [2] Samuele Pedroni on python-dev https://mail.python.org/pipermail/python-dev/2003-August/037795.html [3] Thread on python-dev with subject [Python-Dev] pre-PEP: Resource-Release Support for Generators starting at https://mail.python.org/pipermail/python-dev/2003-August/037803.html
python-peps
2024-10-18T13:23:33.636347
2002-12-18T00:00:00
{ "license": "Public Domain", "url": "https://peps.python.org/pep-0310/", "authors": [ "Michael Hudson" ], "pep_number": "0310", "pandoc_version": "3.5" }
0755
PEP: 755 Title: Implicit namespace policy for PyPI Author: Ofek Lev <[email protected]> Sponsor: Barry Warsaw <[email protected]> PEP-Delegate: Dustin Ingram <[email protected]> Discussions-To: https://discuss.python.org/t/63191 Status: Draft Type: Process Topic: Packaging Created: 05-Sep-2024 Post-History: 07-Sep-2024, Abstract This PEP codifies an implementation of PEP 752 for PyPI[1]. Motivation Many projects and communities would benefit from the ability to reserve namespaces. Since PyPI exists to serve the Python community, it is critical to gather feedback to ensure that everyone's needs are met. A dedicated PEP is required because the operational and policy nuances are up to each package repository to decide. Rationale PyPI has been understaffed, receiving the first dedicated specialist in July 2024. Due to lack of resources, user support has been lacking for package name claims, organization requests, storage limit increases, and even account recovery. The default policy of giving paid organizations more leniency when reserving namespaces provides the following benefits: - PyPI would have a constant source of funding for support specialists, infrastructure maintenance, bug fixes and new features. - Although each application would require independent review, less human feedback would be required because the process to approve a paid organization already bestows a certain amount of trust. Terminology Paid/Corporate Organization Corporate organizations are organizations <752#organizations> that pay for special functionality on PyPI. This PEP refers to them as paid in most circumstances for brevity and to ease understanding for non-native speakers. Root Grant A grant as defined by PEP 752 terminology <752#terminology>. Child Grant A grant created from a root grant with the associated namespace being a child namespace as defined by PEP 752 terminology <752#terminology>. Implementation Grant Applications Submission Only organization (non-user) accounts have access to the grant application form. Applications for paid organizations receive priority in the reviewing queue. This is both to offer a meaningful benefit to paid organizations and to ensure that funding is available for PyPI's operational costs, including more reviewers. Approval Criteria 1. The namespace must not be something common like tool or apps. 2. The namespace should be greater than three characters. 3. The namespace should properly and clearly identify the reservation owner. 4. The organization should be actively using the namespace. 5. There should be evidence that not reserving the namespace may cause ambiguity, confusion, or other harm to the community. Organizations that are not paid organizations will represent one of the following: - Large, popular open-source projects with many packages - Universities that actively publish packages - Government organizations that actively publish packages - NPOs/NGOs that actively publish packages like Our World in Data Generally speaking, reviewers should be more tolerant of paid organizations that apply for grants for which they are not yet using. For example, while it's reasonable to grant a namespace to a startup or an existing company with a new product line, it's not as reasonable to grant a namespace to a community project that doesn't have many users. Rejections Rejected applications will receive clear rationale for the decision based on the approval criteria. Applications rejected due to the namespace being too common will be persisted internally for future reviewers to reference and new applications attempting to reserve a namespace that was previously rejected for that reason will display a warning. Acceptance When an application is accepted for a namespace that is used by projects outside of the organization, an email will be sent to the owners of the projects notifying them of the new grant. The email will contain a link to the namespace's page. Grant Types There are two types of grants. Root Grant An organization gets a root grant for every approved application. This grant may produce any number of child grants. Child Grant A child grant may be created by the owner of a root grant at any time without approval. The namespace associated with such grants must be a child namespace of the root grant's namespace. Child grants cannot have their own child grants. Grant Ownership The owner of a grant may allow any number of other organizations to use the grant. The grants behave as if they were owned by the organization. The owner may revoke this permission at any time. The owner may transfer ownership to another organization at any time without approval from PyPI admins. If the organization is a paid organization, the target for transfer must also be a paid organization. Settings for permitted organizations are transferred as well. User Interface Namespace Page The namespace of every active grant will have its own page that has information such as its open <752#open-namespaces> status, the current owners, the time at which ownership was granted and the total number of projects that match the namespace. Project Page Every project's page (example) that matches an active namespace grant will indicate what the prefix is (NuGet currently does not do this) and will stand out as a pill or label. This value will match the prefix key in the namespace detail API <752#namespace-detail>. Clicking on the namespace will take the user to its page. Visual Indicators For projects that match an active namespace grant, users will be able to quickly ascertain which of the following scenarios apply: 1. Projects that are tied to a grant owner will not have a visual indicator and users should solely rely on the always-present prefix. 2. Projects that are not tied to a grant owner and the matching grant is open <752#open-namespaces> will have a unique indicator that does not convey mistrust or danger. A good choice might be the users icon from Font Awesome or the groups icon from Google Fonts. 3. Projects that are not tied to a grant owner and the matching grant is restricted will have a unique visual indicator. This situation arises when the project existed before the grant was created. The indicator will convey inauthenticity or lack of trust. A good choice might be a warning sign (⚠). Open Namespaces When a child grant is created, its open <752#open-namespaces> status will be inherited from the root grant. Owners of child grants may make them open at any time. If a grant is open, it cannot be made restricted unless the owner of the grant is the owner of every project that matches the namespace. Grant Removal If a grant is shared with other organizations, the owner organization must initiate a transfer as a prerequisite for organization deletion. If a grant is not shared, the owner may unclaim the namespace in either of the following circumstances: - The organization manually removes themselves as the owner. - The organization is deleted. When a reserved namespace becomes unclaimed, the UI will reflect this such that matching projects will no longer have any indicators on their page nor will the namespace have a dedicated page. How to Teach This For organizations, we will document how to reserve namespaces, what the benefits are and pricing. We will document PEP 541 on the same pages so that organizations are aware of the main mechanism to report improper uses of existing packages matching their grants. Rejected Ideas Page for Viewing All Active Grants There is no page to view all active namespace grants because this has the potential to leak private information such as upcoming products. Visual Indicator for Owned Projects There is no indicator for projects that are tied to a grant owner primarily to reduce clutter, especially since this is the most common scenario. If there was an indicator, it would not be a check mark or similar as NuGet chose because it may mistakingly convey that there are associated security guarantees inherent to the use of the package. Additionally, some social media platforms use a check mark for verified users which may cause confusion. References Copyright This document is placed in the public domain or under the CC0-1.0-Universal license, whichever is more permissive. [1] The Python Package Index (https://pypi.org)
python-peps
2024-10-18T13:23:33.657319
2024-09-05T00:00:00
{ "license": "Public Domain", "url": "https://peps.python.org/pep-0755/", "authors": [ "Ofek Lev" ], "pep_number": "0755", "pandoc_version": "3.5" }
0376
PEP: 376 Title: Database of Installed Python Distributions Author: Tarek Ziadé <[email protected]> Status: Final Type: Standards Track Topic: Packaging Content-Type: text/x-rst Created: 22-Feb-2009 Python-Version: 2.7, 3.2 Post-History: 22-Jun-2009 packaging:core-metadata Abstract The goal of this PEP is to provide a standard infrastructure to manage project distributions installed on a system, so all tools that are installing or removing projects are interoperable. To achieve this goal, the PEP proposes a new format to describe installed distributions on a system. It also describes a reference implementation for the standard library. In the past an attempt was made to create an installation database (see PEP 262). Combined with PEP 345, the current proposal supersedes PEP 262. Note: the implementation plan didn't go as expected, so it should be considered informative only for this PEP. Rationale There are two problems right now in the way distributions are installed in Python: - There are too many ways to do it and this makes interoperation difficult. - There is no API to get information on installed distributions. How distributions are installed Right now, when a distribution is installed in Python, every element can be installed in a different directory. For instance, Distutils installs the pure Python code in the purelib directory, which is lib/python2.6/site-packages for unix-like systems and Mac OS X, or Lib\site-packages under Python's installation directory for Windows. Additionally, the install_egg_info subcommand of the Distutils install command adds an .egg-info file for the project into the purelib directory. For example, for the docutils distribution, which contains one package an extra module and executable scripts, three elements are installed in site-packages: - docutils: The docutils package. - roman.py: An extra module used by docutils. - docutils-0.5-py2.6.egg-info: A file containing the distribution metadata as described in PEP 314. This file corresponds to the file called PKG-INFO, built by the sdist command. Some executable scripts, such as rst2html.py, are also added in the bin directory of the Python installation. Another project called setuptools[1] has two other formats to install distributions, called EggFormats[2]: - a self-contained .egg directory, that contains all the distribution files and the distribution metadata in a file called PKG-INFO in a subdirectory called EGG-INFO. setuptools creates other files in that directory that can be considered as complementary metadata. - an .egg-info directory installed in site-packages, that contains the same files EGG-INFO has in the .egg format. The first format is automatically used when you install a distribution that uses the setuptools.setup function in its setup.py file, instead of the distutils.core.setup one. setuptools also add a reference to the distribution into an easy-install.pth file. Last, the setuptools project provides an executable script called easy_install[3] that installs all distributions, including distutils-based ones in self-contained .egg directories. If you want to have standalone .egg-info directories for your distributions, e.g. the second setuptools format, you have to force it when you work with a setuptools-based distribution or with the easy_install script. You can force it by using the --single-version-externally-managed option or the --root option. This will make the setuptools project install the project like distutils does. This option is used by : - the pip[4] installer - the Fedora packagers[5]. - the Debian packagers[6]. Uninstall information Distutils doesn't provide an uninstall command. If you want to uninstall a distribution, you have to be a power user and remove the various elements that were installed, and then look over the .pth file to clean them if necessary. And the process differs depending on the tools you have used to install the distribution and if the distribution's setup.py uses Distutils or Setuptools. Under some circumstances, you might not be able to know for sure that you have removed everything, or that you didn't break another distribution by removing a file that is shared among several distributions. But there's a common behavior: when you install a distribution, files are copied in your system. And it's possible to keep track of these files for later removal. Moreover, the Pip project has gained an uninstall feature lately. It records all installed files, using the record option of the install command. What this PEP proposes To address those issues, this PEP proposes a few changes: - A new .dist-info structure using a directory, inspired on one format of the EggFormats standard from setuptools. - New APIs in pkgutil to be able to query the information of installed distributions. - An uninstall function and an uninstall script in Distutils. One .dist-info directory per installed distribution This PEP proposes an installation format inspired by one of the options in the EggFormats standard, the one that uses a distinct directory located in the site-packages directory. This distinct directory is named as follows: name + '-' + version + '.dist-info' This .dist-info directory can contain these files: - METADATA: contains metadata, as described in PEP 345, PEP 314 and PEP 241. - RECORD: records the list of installed files - INSTALLER: records the name of the tool used to install the project - REQUESTED: the presence of this file indicates that the project installation was explicitly requested (i.e., not installed as a dependency). The METADATA, RECORD and INSTALLER files are mandatory, while REQUESTED may be missing. This proposal will not impact Python itself because the metadata files are not used anywhere yet in the standard library besides Distutils. It will impact the setuptools and pip projects but, given the fact that they already work with a directory that contains a PKG-INFO file, the change will have no deep consequences. RECORD A RECORD file is added inside the .dist-info directory at installation time when installing a source distribution using the install command. Notice that when installing a binary distribution created with bdist command or a bdist-based command, the RECORD file will be installed as well since these commands use the install command to create binary distributions. The RECORD file holds the list of installed files. These correspond to the files listed by the record option of the install command, and will be generated by default. This allows the implementation of an uninstallation feature, as explained later in this PEP. The install command also provides an option to prevent the RECORD file from being written and this option should be used when creating system packages. Third-party installation tools also should not overwrite or delete files that are not in a RECORD file without prompting or warning. This RECORD file is inspired from PEP 262 FILES. The RECORD file is a CSV file, composed of records, one line per installed file. The csv module is used to read the file, with these options: - field delimiter : , - quoting char : ". - line terminator : os.linesep (so \r\n or \n) When a distribution is installed, files can be installed under: - the base location: path defined by the --install-lib option, which defaults to the site-packages directory. - the installation prefix: path defined by the --prefix option, which defaults to sys.prefix. - any other path on the system. Each record is composed of three elements: - the file's path - a '/'-separated path, relative to the base location, if the file is under the base location. - a '/'-separated path, relative to the base location, if the file is under the installation prefix AND if the base location is a subpath of the installation prefix. - an absolute path, using the local platform separator - a hash of the file's contents. Notice that pyc and pyo generated files don't have any hash because they are automatically produced from py files. So checking the hash of the corresponding py file is enough to decide if the file and its associated pyc or pyo files have changed. The hash is either the empty string or the hash algorithm as named in hashlib.algorithms_guaranteed, followed by the equals character =, followed by the urlsafe-base64-nopad encoding of the digest (base64.urlsafe_b64encode(digest) with trailing = removed). - the file's size in bytes The csv module is used to generate this file, so the field separator is ",". Any "," character found within a field is escaped automatically by csv. When the file is read, the U option is used so the universal newline support (see PEP 278) is activated, avoiding any trouble reading a file produced on a platform that uses a different new line terminator. Here's an example of a RECORD file (extract): lib/python2.6/site-packages/docutils/__init__.py,md5=nWt-Dge1eug4iAgqLS_uWg,9544 lib/python2.6/site-packages/docutils/__init__.pyc,, lib/python2.6/site-packages/docutils/core.py,md5=X90C_JLIcC78PL74iuhPnA,66188 lib/python2.6/site-packages/docutils/core.pyc,, lib/python2.6/site-packages/roman.py,md5=7YhfNczihNjOY0FXlupwBg,234 lib/python2.6/site-packages/roman.pyc,, /usr/local/bin/rst2html.py,md5=g22D3amDLJP-FhBzCi7EvA,234 /usr/local/bin/rst2html.pyc,, python2.6/site-packages/docutils-0.5.dist-info/METADATA,md5=ovJyUNzXdArGfmVyb0onyA,195 lib/python2.6/site-packages/docutils-0.5.dist-info/RECORD,, Notice that the RECORD file can't contain a hash of itself and is just mentioned here A project that installs a config.ini file in /etc/myapp will be added like this: /etc/myapp/config.ini,md5=gLfd6IANquzGLhOkW4Mfgg,9544 For a windows platform, the drive letter is added for the absolute paths, so a file that is copied in c:MyAppwill be: c:\etc\myapp\config.ini,md5=gLfd6IANquzGLhOkW4Mfgg,9544 INSTALLER The install command has a new option called installer. This option is the name of the tool used to invoke the installation. It's a normalized lower-case string matching [a-z0-9_\-\.]. $ python setup.py install --installer=pkg-system It defaults to distutils if not provided. When a distribution is installed, the INSTALLER file is generated in the .dist-info directory with this value, to keep track of who installed the distribution. The file is a single-line text file. REQUESTED Some install tools automatically detect unfulfilled dependencies and install them. In these cases, it is useful to track which distributions were installed purely as a dependency, so if their dependent distribution is later uninstalled, the user can be alerted of the orphaned dependency. If a distribution is installed by direct user request (the usual case), a file REQUESTED is added to the .dist-info directory of the installed distribution. The REQUESTED file may be empty, or may contain a marker comment line beginning with the "#" character. If an install tool installs a distribution automatically, as a dependency of another distribution, the REQUESTED file should not be created. The install command of distutils by default creates the REQUESTED file. It accepts --requested and --no-requested options to explicitly specify whether the file is created. If a distribution that was already installed on the system as a dependency is later installed by name, the distutils install command will create the REQUESTED file in the .dist-info directory of the existing installation. Implementation details Note: this section is non-normative. In the end, this PEP was implemented by third-party libraries and tools, not the standard library. New functions and classes in pkgutil To use the .dist-info directory content, we need to add in the standard library a set of APIs. The best place to put these APIs is pkgutil. Functions The new functions added in the pkgutil module are : - distinfo_dirname(name, version) -> directory name name is converted to a standard distribution name by replacing any runs of non-alphanumeric characters with a single '-'. version is converted to a standard version string. Spaces become dots, and all other non-alphanumeric characters (except dots) become dashes, with runs of multiple dashes condensed to a single dash. Both attributes are then converted into their filename-escaped form, i.e. any '-' characters are replaced with '_' other than the one in 'dist-info' and the one separating the name from the version number. - get_distributions() -> iterator of Distribution instances. Provides an iterator that looks for .dist-info directories in sys.path and returns Distribution instances for each one of them. - get_distribution(name) -> Distribution or None. - obsoletes_distribution(name, version=None) -> iterator of Distribution instances. Iterates over all distributions to find which distributions obsolete name. If a version is provided, it will be used to filter the results. - provides_distribution(name, version=None) -> iterator of Distribution instances. Iterates over all distributions to find which distributions provide name. If a version is provided, it will be used to filter the results. Scans all elements in sys.path and looks for all directories ending with .dist-info. Returns a Distribution corresponding to the .dist-info directory that contains a METADATA that matches name for the name metadata. This function only returns the first result founded, since no more than one values are expected. If the directory is not found, returns None. - get_file_users(path) -> iterator of Distribution instances. Iterates over all distributions to find out which distributions uses path. path can be a local absolute path or a relative '/'-separated path. A local absolute path is an absolute path in which occurrences of '/' have been replaced by the system separator given by os.sep. Distribution class A new class called Distribution is created with the path of the .dist-info directory provided to the constructor. It reads the metadata contained in METADATA when it is instantiated. Distribution(path) -> instance Creates a Distribution instance for the given path. Distribution provides the following attributes: - name: The name of the distribution. - metadata: A DistributionMetadata instance loaded with the distribution's METADATA file. - requested: A boolean that indicates whether the REQUESTED metadata file is present (in other words, whether the distribution was installed by user request). And following methods: - get_installed_files(local=False) -> iterator of (path, hash, size) Iterates over the RECORD entries and return a tuple (path, hash, size) for each line. If local is True, the path is transformed into a local absolute path. Otherwise the raw value from RECORD is returned. A local absolute path is an absolute path in which occurrences of '/' have been replaced by the system separator given by os.sep. - uses(path) -> Boolean Returns True if path is listed in RECORD. path can be a local absolute path or a relative '/'-separated path. - get_distinfo_file(path, binary=False) -> file object Returns a file located under the .dist-info directory. Returns a file instance for the file pointed by path. path has to be a '/'-separated path relative to the .dist-info directory or an absolute path. If path is an absolute path and doesn't start with the .dist-info directory path, a DistutilsError is raised. If binary is True, opens the file in read-only binary mode (rb), otherwise opens it in read-only mode (r). - get_distinfo_files(local=False) -> iterator of paths Iterates over the RECORD entries and returns paths for each line if the path is pointing to a file located in the .dist-info directory or one of its subdirectories. If local is True, each path is transformed into a local absolute path. Otherwise the raw value from RECORD is returned. Notice that the API is organized in five classes that work with directories and Zip files (so it works with files included in Zip files, see PEP 273 for more details). These classes are described in the documentation of the prototype implementation for interested readers[7]. Examples Let's use some of the new APIs with our docutils example: >>> from pkgutil import get_distribution, get_file_users, distinfo_dirname >>> dist = get_distribution('docutils') >>> dist.name 'docutils' >>> dist.metadata.version '0.5' >>> distinfo_dirname('docutils', '0.5') 'docutils-0.5.dist-info' >>> distinfo_dirname('python-ldap', '2.5') 'python_ldap-2.5.dist-info' >>> distinfo_dirname('python-ldap', '2.5 a---5') 'python_ldap-2.5.a_5.dist-info' >>> for path, hash, size in dist.get_installed_files():: ... print '%s %s %d' % (path, hash, size) ... python2.6/site-packages/docutils/__init__.py,b690274f621402dda63bf11ba5373bf2,9544 python2.6/site-packages/docutils/core.py,9c4b84aff68aa55f2e9bf70481b94333,66188 python2.6/site-packages/roman.py,a4b84aff68aa55f2e9bf70481b943D3,234 /usr/local/bin/rst2html.py,a4b84aff68aa55f2e9bf70481b943D3,234 python2.6/site-packages/docutils-0.5.dist-info/METADATA,6fe57de576d749536082d8e205b77748,195 python2.6/site-packages/docutils-0.5.dist-info/RECORD >>> dist.uses('docutils/core.py') True >>> dist.uses('/usr/local/bin/rst2html.py') True >>> dist.get_distinfo_file('METADATA') <open file at ...> >>> dist.requested True New functions in Distutils Distutils already provides a very basic way to install a distribution, which is running the install command over the setup.py script of the distribution. Distutils2 <262> will provide a very basic uninstall function, that is added in distutils2.util and takes the name of the distribution to uninstall as its argument. uninstall uses the APIs described earlier and remove all unique files, as long as their hash didn't change. Then it removes empty directories left behind. uninstall returns a list of uninstalled files: >>> from distutils2.util import uninstall >>> uninstall('docutils') ['/opt/local/lib/python2.6/site-packages/docutils/core.py', ... '/opt/local/lib/python2.6/site-packages/docutils/__init__.py'] If the distribution is not found, a DistutilsUninstallError is raised. Filtering To make it a reference API for third-party projects that wish to control how uninstall works, a second callable argument can be used. It's called for each file that is removed. If the callable returns True, the file is removed. If it returns False, it's left alone. Examples: >>> def _remove_and_log(path): ... logging.info('Removing %s' % path) ... return True ... >>> uninstall('docutils', _remove_and_log) >>> def _dry_run(path): ... logging.info('Removing %s (dry run)' % path) ... return False ... >>> uninstall('docutils', _dry_run) Of course, a third-party tool can use lower-level pkgutil APIs to implement its own uninstall feature. Installer marker As explained earlier in this PEP, the install command adds an INSTALLER file in the .dist-info directory with the name of the installer. To avoid removing distributions that were installed by another packaging system, the uninstall function takes an extra argument installer which defaults to distutils2. When called, uninstall controls that the INSTALLER file matches this argument. If not, it raises a DistutilsUninstallError: >>> uninstall('docutils') Traceback (most recent call last): ... DistutilsUninstallError: docutils was installed by 'cool-pkg-manager' >>> uninstall('docutils', installer='cool-pkg-manager') This allows a third-party application to use the uninstall function and strongly suggest that no other program remove a distribution it has previously installed. This is useful when a third-party program that relies on Distutils APIs does extra steps on the system at installation time, it has to undo at uninstallation time. Adding an Uninstall script An uninstall script is added in Distutils2. and is used like this: $ python -m distutils2.uninstall projectname Notice that script doesn't control if the removal of a distribution breaks another distribution. Although it makes sure that all the files it removes are not used by any other distribution, by using the uninstall function. Also note that this uninstall script pays no attention to the REQUESTED metadata; that is provided only for use by external tools to provide more advanced dependency management. Backward compatibility and roadmap These changes don't introduce any compatibility problems since they will be implemented in: - pkgutil in new functions - distutils2 The plan is to include the functionality outlined in this PEP in pkgutil for Python 3.2, and in Distutils2. Distutils2 will also contain a backport of the new pgkutil, and can be used for 2.4 onward. Distributions installed using existing, pre-standardization formats do not have the necessary metadata available for the new API, and thus will be ignored. Third-party tools may of course to continue to support previous formats in addition to the new format, in order to ease the transition. References Acknowledgements Jim Fulton, Ian Bicking, Phillip Eby, Rafael Villar Burke, and many people at Pycon and Distutils-SIG. Copyright This document has been placed in the public domain. Local Variables: mode: indented-text indent-tabs-mode: nil sentence-end-double-space: t fill-column: 70 coding: utf-8 End: [1] http://peak.telecommunity.com/DevCenter/setuptools [2] http://peak.telecommunity.com/DevCenter/EggFormats [3] http://peak.telecommunity.com/DevCenter/EasyInstall [4] http://pypi.python.org/pypi/pip [5] http://fedoraproject.org/wiki/Packaging/Python/Eggs#Providing_Eggs_using_Setuptools [6] http://wiki.debian.org/DebianPython/NewPolicy [7] http://bitbucket.org/tarek/pep376/
python-peps
2024-10-18T13:23:33.694481
2009-02-22T00:00:00
{ "license": "Public Domain", "url": "https://peps.python.org/pep-0376/", "authors": [ "Tarek Ziadé" ], "pep_number": "0376", "pandoc_version": "3.5" }
0286
PEP: 286 Title: Enhanced Argument Tuples Author: Martin von Löwis <[email protected]> Status: Deferred Type: Standards Track Content-Type: text/x-rst Created: 03-Mar-2002 Python-Version: 2.3 Post-History: Abstract PyArg_ParseTuple is confronted with difficult memory management if an argument converter creates new memory. To deal with these cases, a specialized argument type is proposed. PEP Deferral Further exploration of the concepts covered in this PEP has been deferred for lack of a current champion interested in promoting the goals of the PEP and collecting and incorporating feedback, and with sufficient available time to do so effectively. The resolution of this PEP may also be affected by the resolution of PEP 426, which proposes the use of a preprocessing step to generate some aspects of C API interface code. Problem description Today, argument tuples keep references to the function arguments, which are guaranteed to live as long as the argument tuple exists which is at least as long as the function call is being executed. In some cases, parsing an argument will allocate new memory, which is then to be released by the caller. This has two problems: 1. In case of failure, the application cannot know what memory to release; most callers don't even know that they have the responsibility to release that memory. Example for this are the N converter (bug #416288[1]) and the es# converter (bug #501716[2]). 2. Even for successful argument parsing, it is still inconvenient for the caller to be responsible for releasing the memory. In some cases, this is unnecessarily inefficient. For example, the es converter copies the conversion result into memory, even though there already is a string object that has the right contents. Proposed solution A new type 'argument tuple' is introduced. This type derives from tuple, adding an __dict__ member (at tp_dictoffset -4). Instances of this type might get the following attributes: - 'failobjects', a list of objects which need to be deallocated in case of success - 'okobjects', a list of object which will be released when the argument tuple is released To manage this type, the following functions will be added, and used appropriately in ceval.c and getargs.c: - PyArgTuple_New(int); - PyArgTuple_AddFailObject(PyObject*, PyObject*); - PyArgTuple_AddFailMemory(PyObject*, void*); - PyArgTuple_AddOkObject(PyObject*, PyObject*); - PyArgTuple_AddOkMemory(PyObject*, void*); - PyArgTuple_ClearFailed(PyObject*); When argument parsing fails, all fail objects will be released through Py_DECREF, and all fail memory will be released through PyMem_Free. If parsing succeeds, the references to the fail objects and fail memory are dropped, without releasing anything. When the argument tuple is released, all ok objects and memory will be released. If those functions are called with an object of a different type, a warning is issued and no further action is taken; usage of the affected converters without using argument tuples is deprecated. Affected converters The following converters will add fail memory and fail objects: N, es, et, es#, et# (unless memory is passed into the converter) New converters To simplify Unicode conversion, the e* converters are duplicated as E* converters (Es, Et, Es#, Et#). The usage of the E* converters is identical to that of the e* converters, except that the application will not need to manage the resulting memory. This will be implemented through registration of Ok objects with the argument tuple. The e* converters are deprecated. References Copyright This document has been placed in the public domain. [1] infrequent memory leak in pyexpat (http://bugs.python.org/issue416288) [2] "es#" parser marker leaks memory (http://bugs.python.org/issue501716)
python-peps
2024-10-18T13:23:33.704794
2002-03-03T00:00:00
{ "license": "Public Domain", "url": "https://peps.python.org/pep-0286/", "authors": [ "Martin von Löwis" ], "pep_number": "0286", "pandoc_version": "3.5" }
0402
PEP: 402 Title: Simplified Package Layout and Partitioning Author: Phillip J. Eby Status: Rejected Type: Standards Track Topic: Packaging Content-Type: text/x-rst Created: 12-Jul-2011 Python-Version: 3.3 Post-History: 20-Jul-2011 Replaces: 382 Rejection Notice On the first day of sprints at US PyCon 2012 we had a long and fruitful discussion about PEP 382 and PEP 402. We ended up rejecting both but a new PEP will be written to carry on in the spirit of PEP 402. Martin von Löwis wrote up a summary:[1]. Abstract This PEP proposes an enhancement to Python's package importing to: - Surprise users of other languages less, - Make it easier to convert a module into a package, and - Support dividing packages into separately installed components (ala "namespace packages", as described in PEP 382) The proposed enhancements do not change the semantics of any currently-importable directory layouts, but make it possible for packages to use a simplified directory layout (that is not importable currently). However, the proposed changes do NOT add any performance overhead to the importing of existing modules or packages, and performance for the new directory layout should be about the same as that of previous "namespace package" solutions (such as pkgutil.extend_path()). The Problem "Most packages are like modules. Their contents are highly interdependent and can't be pulled apart. [However,] some packages exist to provide a separate namespace. ... It should be possible to distribute sub-packages or submodules of these [namespace packages] independently." -- Jim Fulton, shortly before the release of Python 2.3[2] When new users come to Python from other languages, they are often confused by Python's package import semantics. At Google, for example, Guido received complaints from "a large crowd with pitchforks"[3] that the requirement for packages to contain an __init__ module was a "misfeature", and should be dropped. In addition, users coming from languages like Java or Perl are sometimes confused by a difference in Python's import path searching. In most other languages that have a similar path mechanism to Python's sys.path, a package is merely a namespace that contains modules or classes, and can thus be spread across multiple directories in the language's path. In Perl, for instance, a Foo::Bar module will be searched for in Foo/ subdirectories all along the module include path, not just in the first such subdirectory found. Worse, this is not just a problem for new users: it prevents anyone from easily splitting a package into separately-installable components. In Perl terms, it would be as if every possible Net:: module on CPAN had to be bundled up and shipped in a single tarball! For that reason, various workarounds for this latter limitation exist, circulated under the term "namespace packages". The Python standard library has provided one such workaround since Python 2.3 (via the pkgutil.extend_path() function), and the "setuptools" package provides another (via pkg_resources.declare_namespace()). The workarounds themselves, however, fall prey to a third issue with Python's way of laying out packages in the filesystem. Because a package must contain an __init__ module, any attempt to distribute modules for that package must necessarily include that __init__ module, if those modules are to be importable. However, the very fact that each distribution of modules for a package must contain this (duplicated) __init__ module, means that OS vendors who package up these module distributions must somehow handle the conflict caused by several module distributions installing that __init__ module to the same location in the filesystem. This led to the proposing of PEP 382 ("Namespace Packages") - a way to signal to Python's import machinery that a directory was importable, using unique filenames per module distribution. However, there was more than one downside to this approach. Performance for all import operations would be affected, and the process of designating a package became even more complex. New terminology had to be invented to explain the solution, and so on. As terminology discussions continued on the Import-SIG, it soon became apparent that the main reason it was so difficult to explain the concepts related to "namespace packages" was because Python's current way of handling packages is somewhat underpowered, when compared to other languages. That is, in other popular languages with package systems, no special term is needed to describe "namespace packages", because all packages generally behave in the desired fashion. Rather than being an isolated single directory with a special marker module (as in Python), packages in other languages are typically just the union of appropriately-named directories across the entire import or inclusion path. In Perl, for example, the module Foo is always found in a Foo.pm file, and a module Foo::Bar is always found in a Foo/Bar.pm file. (In other words, there is One Obvious Way to find the location of a particular module.) This is because Perl considers a module to be different from a package: the package is purely a namespace in which other modules may reside, and is only coincidentally the name of a module as well. In current versions of Python, however, the module and the package are more tightly bound together. Foo is always a module -- whether it is found in Foo.py or Foo/__init__.py -- and it is tightly linked to its submodules (if any), which must reside in the exact same directory where the __init__.py was found. On the positive side, this design choice means that a package is quite self-contained, and can be installed, copied, etc. as a unit just by performing an operation on the package's root directory. On the negative side, however, it is non-intuitive for beginners, and requires a more complex step to turn a module into a package. If Foo begins its life as Foo.py, then it must be moved and renamed to Foo/__init__.py. Conversely, if you intend to create a Foo.Bar module from the start, but have no particular module contents to put in Foo itself, then you have to create an empty and seemingly-irrelevant Foo/__init__.py file, just so that Foo.Bar can be imported. (And these issues don't just confuse newcomers to the language, either: they annoy many experienced developers as well.) So, after some discussion on the Import-SIG, this PEP was created as an alternative to PEP 382, in an attempt to solve all of the above problems, not just the "namespace package" use cases. And, as a delightful side effect, the solution proposed in this PEP does not affect the import performance of ordinary modules or self-contained (i.e. __init__-based) packages. The Solution In the past, various proposals have been made to allow more intuitive approaches to package directory layout. However, most of them failed because of an apparent backward-compatibility problem. That is, if the requirement for an __init__ module were simply dropped, it would open up the possibility for a directory named, say, string on sys.path, to block importing of the standard library string module. Paradoxically, however, the failure of this approach does not arise from the elimination of the __init__ requirement! Rather, the failure arises because the underlying approach takes for granted that a package is just ONE thing, instead of two. In truth, a package comprises two separate, but related entities: a module (with its own, optional contents), and a namespace where other modules or packages can be found. In current versions of Python, however, the module part (found in __init__) and the namespace for submodule imports (represented by the __path__ attribute) are both initialized at the same time, when the package is first imported. And, if you assume this is the only way to initialize these two things, then there is no way to drop the need for an __init__ module, while still being backwards-compatible with existing directory layouts. After all, as soon as you encounter a directory on sys.path matching the desired name, that means you've "found" the package, and must stop searching, right? Well, not quite. A Thought Experiment Let's hop into the time machine for a moment, and pretend we're back in the early 1990s, shortly before Python packages and __init__.py have been invented. But, imagine that we are familiar with Perl-like package imports, and we want to implement a similar system in Python. We'd still have Python's module imports to build on, so we could certainly conceive of having Foo.py as a parent Foo module for a Foo package. But how would we implement submodule and subpackage imports? Well, if we didn't have the idea of __path__ attributes yet, we'd probably just search sys.path looking for Foo/Bar.py. But we'd only do it when someone actually tried to import Foo.Bar. NOT when they imported Foo. And that lets us get rid of the backwards-compatibility problem of dropping the __init__ requirement, back here in 2011. How? Well, when we import Foo, we're not even looking for Foo/ directories on sys.path, because we don't care yet. The only point at which we care, is the point when somebody tries to actually import a submodule or subpackage of Foo. That means that if Foo is a standard library module (for example), and I happen to have a Foo directory on sys.path (without an __init__.py, of course), then nothing breaks. The Foo module is still just a module, and it's still imported normally. Self-Contained vs. "Virtual" Packages Of course, in today's Python, trying to import Foo.Bar will fail if Foo is just a Foo.py module (and thus lacks a __path__ attribute). So, this PEP proposes to dynamically create a __path__, in the case where one is missing. That is, if I try to import Foo.Bar the proposed change to the import machinery will notice that the Foo module lacks a __path__, and will therefore try to build one before proceeding. And it will do this by making a list of all the existing Foo/ subdirectories of the directories listed in sys.path. If the list is empty, the import will fail with ImportError, just like today. But if the list is not empty, then it is saved in a new Foo.__path__ attribute, making the module a "virtual package". That is, because it now has a valid __path__, we can proceed to import submodules or subpackages in the normal way. Now, notice that this change does not affect "classic", self-contained packages that have an __init__ module in them. Such packages already have a __path__ attribute (initialized at import time) so the import machinery won't try to create another one later. This means that (for example) the standard library email package will not be affected in any way by you having a bunch of unrelated directories named email on sys.path. (Even if they contain *.py files.) But it does mean that if you want to turn your Foo module into a Foo package, all you have to do is add a Foo/ directory somewhere on sys.path, and start adding modules to it. But what if you only want a "namespace package"? That is, a package that is only a namespace for various separately-distributed submodules and subpackages? For example, if you're Zope Corporation, distributing dozens of separate tools like zc.buildout, each in packages under the zc namespace, you don't want to have to make and include an empty zc.py in every tool you ship. (And, if you're a Linux or other OS vendor, you don't want to deal with the package installation conflicts created by trying to install ten copies of zc.py to the same location!) No problem. All we have to do is make one more minor tweak to the import process: if the "classic" import process fails to find a self-contained module or package (e.g., if import zc fails to find a zc.py or zc/__init__.py), then we once more try to build a __path__ by searching for all the zc/ directories on sys.path, and putting them in a list. If this list is empty, we raise ImportError. But if it's non-empty, we create an empty zc module, and put the list in zc.__path__. Congratulations: zc is now a namespace-only, "pure virtual" package! It has no module contents, but you can still import submodules and subpackages from it, regardless of where they're located on sys.path. (By the way, both of these additions to the import protocol (i.e. the dynamically-added __path__, and dynamically-created modules) apply recursively to child packages, using the parent package's __path__ in place of sys.path as a basis for generating a child __path__. This means that self-contained and virtual packages can contain each other without limitation, with the caveat that if you put a virtual package inside a self-contained one, it's gonna have a really short __path__!) Backwards Compatibility and Performance Notice that these two changes only affect import operations that today would result in ImportError. As a result, the performance of imports that do not involve virtual packages is unaffected, and potential backward compatibility issues are very restricted. Today, if you try to import submodules or subpackages from a module with no __path__, it's an immediate error. And of course, if you don't have a zc.py or zc/__init__.py somewhere on sys.path today, import zc would likewise fail. Thus, the only potential backwards-compatibility issues are: 1. Tools that expect package directories to have an __init__ module, that expect directories without an __init__ module to be unimportable, or that expect __path__ attributes to be static, will not recognize virtual packages as packages. (In practice, this just means that tools will need updating to support virtual packages, e.g. by using pkgutil.walk_modules() instead of using hardcoded filesystem searches.) 2. Code that expects certain imports to fail may now do something unexpected. This should be fairly rare in practice, as most sane, non-test code does not import things that are expected not to exist! The biggest likely exception to the above would be when a piece of code tries to check whether some package is installed by importing it. If this is done only by importing a top-level module (i.e., not checking for a __version__ or some other attribute), and there is a directory of the same name as the sought-for package on sys.path somewhere, and the package is not actually installed, then such code could be fooled into thinking a package is installed that really isn't. For example, suppose someone writes a script (datagen.py) containing the following code: try: import json except ImportError: import simplejson as json And runs it in a directory laid out like this: datagen.py json/ foo.js bar.js If import json succeeded due to the mere presence of the json/ subdirectory, the code would incorrectly believe that the json module was available, and proceed to fail with an error. However, we can prevent corner cases like these from arising, simply by making one small change to the algorithm presented so far. Instead of allowing you to import a "pure virtual" package (like zc), we allow only importing of the contents of virtual packages. That is, a statement like import zc should raise ImportError if there is no zc.py or zc/__init__.py on sys.path. But, doing import zc.buildout should still succeed, as long as there's a zc/buildout.py or zc/buildout/__init__.py on sys.path. In other words, we don't allow pure virtual packages to be imported directly, only modules and self-contained packages. (This is an acceptable limitation, because there is no functional value to importing such a package by itself. After all, the module object will have no contents until you import at least one of its subpackages or submodules!) Once zc.buildout has been successfully imported, though, there will be a zc module in sys.modules, and trying to import it will of course succeed. We are only preventing an initial import from succeeding, in order to prevent false-positive import successes when clashing subdirectories are present on sys.path. So, with this slight change, the datagen.py example above will work correctly. When it does import json, the mere presence of a json/ directory will simply not affect the import process at all, even if it contains .py files. The json/ directory will still only be searched in the case where an import like import json.converter is attempted. Meanwhile, tools that expect to locate packages and modules by walking a directory tree can be updated to use the existing pkgutil.walk_modules() API, and tools that need to inspect packages in memory should use the other APIs described in the Standard Library Changes/Additions section below. Specification A change is made to the existing import process, when importing names containing at least one . -- that is, imports of modules that have a parent package. Specifically, if the parent package does not exist, or exists but lacks a __path__ attribute, an attempt is first made to create a "virtual path" for the parent package (following the algorithm described in the section on virtual paths, below). If the computed "virtual path" is empty, an ImportError results, just as it would today. However, if a non-empty virtual path is obtained, the normal import of the submodule or subpackage proceeds, using that virtual path to find the submodule or subpackage. (Just as it would have with the parent's __path__, if the parent package had existed and had a __path__.) When a submodule or subpackage is found (but not yet loaded), the parent package is created and added to sys.modules (if it didn't exist before), and its __path__ is set to the computed virtual path (if it wasn't already set). In this way, when the actual loading of the submodule or subpackage occurs, it will see a parent package existing, and any relative imports will work correctly. However, if no submodule or subpackage exists, then the parent package will not be created, nor will a standalone module be converted into a package (by the addition of a spurious __path__ attribute). Note, by the way, that this change must be applied recursively: that is, if foo and foo.bar are pure virtual packages, then import foo.bar.baz must wait until foo.bar.baz is found before creating module objects for both foo and foo.bar, and then create both of them together, properly setting the foo module's .bar attribute to point to the foo.bar module. In this way, pure virtual packages are never directly importable: an import foo or import foo.bar by itself will fail, and the corresponding modules will not appear in sys.modules until they are needed to point to a successfully imported submodule or self-contained subpackage. Virtual Paths A virtual path is created by obtaining a PEP 302 "importer" object for each of the path entries found in sys.path (for a top-level module) or the parent __path__ (for a submodule). (Note: because sys.meta_path importers are not associated with sys.path or __path__ entry strings, such importers do not participate in this process.) Each importer is checked for a get_subpath() method, and if present, the method is called with the full name of the module/package the path is being constructed for. The return value is either a string representing a subdirectory for the requested package, or None if no such subdirectory exists. The strings returned by the importers are added to the path list being built, in the same order as they are found. (None values and missing get_subpath() methods are simply skipped.) The resulting list (whether empty or not) is then stored in a sys.virtual_package_paths dictionary, keyed by module name. This dictionary has two purposes. First, it serves as a cache, in the event that more than one attempt is made to import a submodule of a virtual package. Second, and more importantly, the dictionary can be used by code that extends sys.path at runtime to update imported packages' __path__ attributes accordingly. (See Standard Library Changes/Additions below for more details.) In Python code, the virtual path construction algorithm would look something like this: def get_virtual_path(modulename, parent_path=None): if modulename in sys.virtual_package_paths: return sys.virtual_package_paths[modulename] if parent_path is None: parent_path = sys.path path = [] for entry in parent_path: # Obtain a PEP 302 importer object - see pkgutil module importer = pkgutil.get_importer(entry) if hasattr(importer, 'get_subpath'): subpath = importer.get_subpath(modulename) if subpath is not None: path.append(subpath) sys.virtual_package_paths[modulename] = path return path And a function like this one should be exposed in the standard library as e.g. imp.get_virtual_path(), so that people creating __import__ replacements or sys.meta_path hooks can reuse it. Standard Library Changes/Additions The pkgutil module should be updated to handle this specification appropriately, including any necessary changes to extend_path(), iter_modules(), etc. Specifically the proposed changes and additions to pkgutil are: - A new extend_virtual_paths(path_entry) function, to extend existing, already-imported virtual packages' __path__ attributes to include any portions found in a new sys.path entry. This function should be called by applications extending sys.path at runtime, e.g. when adding a plugin directory or an egg to the path. The implementation of this function does a simple top-down traversal of sys.virtual_package_paths, and performs any necessary get_subpath() calls to identify what path entries need to be added to the virtual path for that package, given that path_entry has been added to sys.path. (Or, in the case of sub-packages, adding a derived subpath entry, based on their parent package's virtual path.) (Note: this function must update both the path values in sys.virtual_package_paths as well as the __path__ attributes of any corresponding modules in sys.modules, even though in the common case they will both be the same list object.) - A new iter_virtual_packages(parent='') function to allow top-down traversal of virtual packages from sys.virtual_package_paths, by yielding the child virtual packages of parent. For example, calling iter_virtual_packages("zope") might yield zope.app and zope.products (if they are virtual packages listed in sys.virtual_package_paths), but not zope.foo.bar. (This function is needed to implement extend_virtual_paths(), but is also potentially useful for other code that needs to inspect imported virtual packages.) - ImpImporter.iter_modules() should be changed to also detect and yield the names of modules found in virtual packages. In addition to the above changes, the zipimport importer should have its iter_modules() implementation similarly changed. (Note: current versions of Python implement this via a shim in pkgutil, so technically this is also a change to pkgutil.) Last, but not least, the imp module (or importlib, if appropriate) should expose the algorithm described in the virtual paths section above, as a get_virtual_path(modulename, parent_path=None) function, so that creators of __import__ replacements can use it. Implementation Notes For users, developers, and distributors of virtual packages: - While virtual packages are easy to set up and use, there is still a time and place for using self-contained packages. While it's not strictly necessary, adding an __init__ module to your self-contained packages lets users of the package (and Python itself) know that all of the package's code will be found in that single subdirectory. In addition, it lets you define __all__, expose a public API, provide a package-level docstring, and do other things that make more sense for a self-contained project than for a mere "namespace" package. - sys.virtual_package_paths is allowed to contain entries for non-existent or not-yet-imported package names; code that uses its contents should not assume that every key in this dictionary is also present in sys.modules or that importing the name will necessarily succeed. - If you are changing a currently self-contained package into a virtual one, it's important to note that you can no longer use its __file__ attribute to locate data files stored in a package directory. Instead, you must search __path__ or use the __file__ of a submodule adjacent to the desired files, or of a self-contained subpackage that contains the desired files. (Note: this caveat is already true for existing users of "namespace packages" today. That is, it is an inherent result of being able to partition a package, that you must know which partition the desired data file lives in. We mention it here simply so that new users converting from self-contained to virtual packages will also be aware of it.) - XXX what is the __file__ of a "pure virtual" package? None? Some arbitrary string? The path of the first directory with a trailing separator? No matter what we put, some code is going to break, but the last choice might allow some code to accidentally work. Is that good or bad? For those implementing PEP 302 importer objects: - Importers that support the iter_modules() method (used by pkgutil to locate importable modules and packages) and want to add virtual package support should modify their iter_modules() method so that it discovers and lists virtual packages as well as standard modules and packages. To do this, the importer should simply list all immediate subdirectory names in its jurisdiction that are valid Python identifiers. XXX This might list a lot of not-really-packages. Should we require importable contents to exist? If so, how deep do we search, and how do we prevent e.g. link loops, or traversing onto different filesystems, etc.? Ick. Also, if virtual packages are listed, they still can't be imported, which is a problem for the way that pkgutil.walk_modules() is currently implemented. - "Meta" importers (i.e., importers placed on sys.meta_path) do not need to implement get_subpath(), because the method is only called on importers corresponding to sys.path entries and __path__ entries. If a meta importer wishes to support virtual packages, it must do so entirely within its own find_module() implementation. Unfortunately, it is unlikely that any such implementation will be able to merge its package subpaths with those of other meta importers or sys.path importers, so the meaning of "supporting virtual packages" for a meta importer is currently undefined! (However, since the intended use case for meta importers is to replace Python's normal import process entirely for some subset of modules, and the number of such importers currently implemented is quite small, this seems unlikely to be a big issue in practice.) References Copyright This document has been placed in the public domain. [1] Namespace Packages resolution (https://mail.python.org/pipermail/import-sig/2012-March/000421.html) [2] "namespace" vs "module" packages (mailing list thread) (http://mail.zope.org/pipermail/zope3-dev/2002-December/004251.html) [3] "Dropping __init__.py requirement for subpackages" (https://mail.python.org/pipermail/python-dev/2006-April/064400.html)
python-peps
2024-10-18T13:23:33.735606
2011-07-12T00:00:00
{ "license": "Public Domain", "url": "https://peps.python.org/pep-0402/", "authors": [ "Phillip J. Eby" ], "pep_number": "0402", "pandoc_version": "3.5" }
0295
PEP: 295 Title: Interpretation of multiline string constants Author: Stepan Koltsov <[email protected]> Status: Rejected Type: Standards Track Content-Type: text/x-rst Created: 22-Jul-2002 Python-Version: 3.0 Post-History: Abstract This PEP describes an interpretation of multiline string constants for Python. It suggests stripping spaces after newlines and stripping a newline if it is first character after an opening quotation. Rationale This PEP proposes an interpretation of multiline string constants in Python. Currently, the value of string constant is all the text between quotations, maybe with escape sequences substituted, e.g.: def f(): """ la-la-la limona, banana """ def g(): return "This is \ string" print repr(f.__doc__) print repr(g()) prints: '\n\tla-la-la\n\tlimona, banana\n\t' 'This is \tstring' This PEP suggest two things: - ignore the first character after opening quotation, if it is newline - ignore in string constants all spaces and tabs up to first non-whitespace character, but no more than current indentation. After applying this, previous program will print: 'la-la-la\nlimona, banana\n' 'This is string' To get this result, previous programs could be rewritten for current Python as (note, this gives the same result with new strings meaning): def f(): """\ la-la-la limona, banana """ def g(): "This is \ string" Or stripping can be done with library routines at runtime (as pydoc does), but this decreases program readability. Implementation I'll say nothing about CPython, Jython or Python.NET. In original Python, there is no info about the current indentation (in spaces) at compile time, so space and tab stripping should be done at parse time. Currently no flags can be passed to the parser in program text (like from __future__ import xxx). I suggest enabling or disabling of this feature at Python compile time depending of CPP flag Py_PARSE_MULTILINE_STRINGS. Alternatives New interpretation of string constants can be implemented with flags 'i' and 'o' to string constants, like: i""" SELECT * FROM car WHERE model = 'i525' """ is in new style, o"""SELECT * FROM employee WHERE birth < 1982 """ is in old style, and """ SELECT employee.name, car.name, car.price FROM employee, car WHERE employee.salary * 36 > car.price """ is in new style after Python-x.y.z and in old style otherwise. Also this feature can be disabled if string is raw, i.e. if flag 'r' specified. Copyright This document has been placed in the Public Domain.
python-peps
2024-10-18T13:23:33.742063
2002-07-22T00:00:00
{ "license": "Public Domain", "url": "https://peps.python.org/pep-0295/", "authors": [ "Stepan Koltsov" ], "pep_number": "0295", "pandoc_version": "3.5" }
0414
PEP: 414 Title: Explicit Unicode Literal for Python 3.3 Version: $Revision$ Last-Modified: $Date$ Author: Armin Ronacher <[email protected]>, Alyssa Coghlan <[email protected]> Status: Final Type: Standards Track Content-Type: text/x-rst Created: 15-Feb-2012 Python-Version: 3.3 Post-History: 28-Feb-2012, 04-Mar-2012 Resolution: https://mail.python.org/pipermail/python-dev/2012-February/116995.html Abstract This document proposes the reintegration of an explicit unicode literal from Python 2.x to the Python 3.x language specification, in order to reduce the volume of changes needed when porting Unicode-aware Python 2 applications to Python 3. BDFL Pronouncement This PEP has been formally accepted for Python 3.3: I'm accepting the PEP. It's about as harmless as they come. Make it so. Proposal This PEP proposes that Python 3.3 restore support for Python 2's Unicode literal syntax, substantially increasing the number of lines of existing Python 2 code in Unicode aware applications that will run without modification on Python 3. Specifically, the Python 3 definition for string literal prefixes will be expanded to allow: "u" | "U" in addition to the currently supported: "r" | "R" The following will all denote ordinary Python 3 strings: 'text' "text" '''text''' """text""" u'text' u"text" u'''text''' u"""text""" U'text' U"text" U'''text''' U"""text""" No changes are proposed to Python 3's actual Unicode handling, only to the acceptable forms for string literals. Exclusion of "Raw" Unicode Literals Python 2 supports a concept of "raw" Unicode literals that don't meet the conventional definition of a raw string: \uXXXX and \UXXXXXXXX escape sequences are still processed by the compiler and converted to the appropriate Unicode code points when creating the associated Unicode objects. Python 3 has no corresponding concept - the compiler performs no preprocessing of the contents of raw string literals. This matches the behaviour of 8-bit raw string literals in Python 2. Since such strings are rarely used and would be interpreted differently in Python 3 if permitted, it was decided that leaving them out entirely was a better choice. Code which uses them will thus still fail immediately on Python 3 (with a Syntax Error), rather than potentially producing different output. To get equivalent behaviour that will run on both Python 2 and Python 3, either an ordinary Unicode literal can be used (with appropriate additional escaping within the string), or else string concatenation or string formatting can be combine the raw portions of the string with those that require the use of Unicode escape sequences. Note that when using from __future__ import unicode_literals in Python 2, the nominally "raw" Unicode string literals will process \uXXXX and \UXXXXXXXX escape sequences, just like Python 2 strings explicitly marked with the "raw Unicode" prefix. Author's Note This PEP was originally written by Armin Ronacher, and Guido's approval was given based on that version. The currently published version has been rewritten by Alyssa Coghlan to include additional historical details and rationale that were taken into account when Guido made his decision, but were not explicitly documented in Armin's version of the PEP. Readers should be aware that many of the arguments in this PEP are not technical ones. Instead, they relate heavily to the social and personal aspects of software development. Rationale With the release of a Python 3 compatible version of the Web Services Gateway Interface (WSGI) specification (PEP 3333) for Python 3.2, many parts of the Python web ecosystem have been making a concerted effort to support Python 3 without adversely affecting their existing developer and user communities. One major item of feedback from key developers in those communities, including Chris McDonough (WebOb, Pyramid), Armin Ronacher (Flask, Werkzeug), Jacob Kaplan-Moss (Django) and Kenneth Reitz (requests) is that the requirement to change the spelling of every Unicode literal in an application (regardless of how that is accomplished) is a key stumbling block for porting efforts. In particular, unlike many of the other Python 3 changes, it isn't one that framework and library authors can easily handle on behalf of their users. Most of those users couldn't care less about the "purity" of the Python language specification, they just want their websites and applications to work as well as possible. While it is the Python web community that has been most vocal in highlighting this concern, it is expected that other highly Unicode aware domains (such as GUI development) may run into similar issues as they (and their communities) start making concerted efforts to support Python 3. Common Objections Complaint: This PEP may harm adoption of Python 3.2 This complaint is interesting, as it carries within it a tacit admission that this PEP will make it easier to port Unicode aware Python 2 applications to Python 3. There are many existing Python communities that are prepared to put up with the constraints imposed by the existing suite of porting tools, or to update their Python 2 code bases sufficiently that the problems are minimised. This PEP is not for those communities. Instead, it is designed specifically to help people that don't want to put up with those difficulties. However, since the proposal is for a comparatively small tweak to the language syntax with no semantic changes, it is feasible to support it as a third party import hook. While such an import hook imposes some import time overhead, and requires additional steps from each application that needs it to get the hook in place, it allows applications that target Python 3.2 to use libraries and frameworks that would otherwise only run on Python 3.3+ due to their use of unicode literal prefixes. One such import hook project is Vinay Sajip's uprefix[1]. For those that prefer to translate their code in advance rather than converting on the fly at import time, Armin Ronacher is working on a hook that runs at install time rather than during import[2]. Combining the two approaches is of course also possible. For example, the import hook could be used for rapid edit-test cycles during local development, but the install hook for continuous integration tasks and deployment on Python 3.2. The approaches described in this section may prove useful, for example, for applications that wish to target Python 3 on the Ubuntu 12.04 LTS release, which will ship with Python 2.7 and 3.2 as officially supported Python versions. Complaint: Python 3 shouldn't be made worse just to support porting from Python 2 This is indeed one of the key design principles of Python 3. However, one of the key design principles of Python as a whole is that "practicality beats purity". If we're going to impose a significant burden on third party developers, we should have a solid rationale for doing so. In most cases, the rationale for backwards incompatible Python 3 changes are either to improve code correctness (for example, stricter default separation of binary and text data and integer division upgrading to floats when necessary), reduce typical memory usage (for example, increased usage of iterators and views over concrete lists), or to remove distracting nuisances that make Python code harder to read without increasing its expressiveness (for example, the comma based syntax for naming caught exceptions). Changes backed by such reasoning are not going to be reverted, regardless of objections from Python 2 developers attempting to make the transition to Python 3. In many cases, Python 2 offered two ways of doing things for historical reasons. For example, inequality could be tested with both != and <> and integer literals could be specified with an optional L suffix. Such redundancies have been eliminated in Python 3, which reduces the overall size of the language and improves consistency across developers. In the original Python 3 design (up to and including Python 3.2), the explicit prefix syntax for unicode literals was deemed to fall into this category, as it is completely unnecessary in Python 3. However, the difference between those other cases and unicode literals is that the unicode literal prefix is not redundant in Python 2 code: it is a programmatically significant distinction that needs to be preserved in some fashion to avoid losing information. While porting tools were created to help with the transition (see next section) it still creates an additional burden on heavy users of unicode strings in Python 2, solely so that future developers learning Python 3 don't need to be told "For historical reasons, string literals may have an optional u or U prefix. Never use this yourselves, it's just there to help with porting from an earlier version of the language." Plenty of students learning Python 2 received similar warnings regarding string exceptions without being confused or irreparably stunted in their growth as Python developers. It will be the same with this feature. This point is further reinforced by the fact that Python 3 still allows the uppercase variants of the B and R prefixes for bytes literals and raw bytes and string literals. If the potential for confusion due to string prefix variants is that significant, where was the outcry asking that these redundant prefixes be removed along with all the other redundancies that were eliminated in Python 3? Just as support for string exceptions was eliminated from Python 2 using the normal deprecation process, support for redundant string prefix characters (specifically, B, R, u, U) may eventually be eliminated from Python 3, regardless of the current acceptance of this PEP. However, such a change will likely only occur once third party libraries supporting Python 2.7 is about as common as libraries supporting Python 2.2 or 2.3 is today. Complaint: The WSGI "native strings" concept is an ugly hack One reason the removal of unicode literals has provoked such concern amongst the web development community is that the updated WSGI specification had to make a few compromises to minimise the disruption for existing web servers that provide a WSGI-compatible interface (this was deemed necessary in order to make the updated standard a viable target for web application authors and web framework developers). One of those compromises is the concept of a "native string". WSGI defines three different kinds of string: - text strings: handled as unicode in Python 2 and str in Python 3 - native strings: handled as str in both Python 2 and Python 3 - binary data: handled as str in Python 2 and bytes in Python 3 Some developers consider WSGI's "native strings" to be an ugly hack, as they are explicitly documented as being used solely for latin-1 decoded "text", regardless of the actual encoding of the underlying data. Using this approach bypasses many of the updates to Python 3's data model that are designed to encourage correct handling of text encodings. However, it generally works due to the specific details of the problem domain - web server and web framework developers are some of the individuals most aware of how blurry the line can get between binary data and text when working with HTTP and related protocols, and how important it is to understand the implications of the encodings in use when manipulating encoded text data. At the application level most of these details are hidden from the developer by the web frameworks and support libraries (both in Python 2 and in Python 3). In practice, native strings are a useful concept because there are some APIs (both in the standard library and in third party frameworks and packages) and some internal interpreter details that are designed primarily to work with str. These components often don't support unicode in Python 2 or bytes in Python 3, or, if they do, require additional encoding details and/or impose constraints that don't apply to the str variants. Some example of interfaces that are best handled by using actual str instances are: - Python identifiers (as attributes, dict keys, class names, module names, import references, etc) - URLs for the most part as well as HTTP headers in urllib/http servers - WSGI environment keys and CGI-inherited values - Python source code for dynamic compilation and AST hacks - Exception messages - __repr__ return value - preferred filesystem paths - preferred OS environment In Python 2.6 and 2.7, these distinctions are most naturally expressed as follows: - u"": text string (unicode) - "": native string (str) - b"": binary data (str, also aliased as bytes) In Python 3, the latin-1 decoded native strings are not distinguished from any other text strings: - "": text string (str) - "": native string (str) - b"": binary data (bytes) If from __future__ import unicode_literals is used to modify the behaviour of Python 2, then, along with an appropriate definition of n(), the distinction can be expressed as: - "": text string - n(""): native string - b"": binary data (While n=str works for simple cases, it can sometimes have problems due to non-ASCII source encodings) In the common subset of Python 2 and Python 3 (with appropriate specification of a source encoding and definitions of the u() and b() helper functions), they can be expressed as: - u(""): text string - "": native string - b(""): binary data That last approach is the only variant that supports Python 2.5 and earlier. Of all the alternatives, the format currently supported in Python 2.6 and 2.7 is by far the cleanest approach that clearly distinguishes the three desired kinds of behaviour. With this PEP, that format will also be supported in Python 3.3+. It will also be supported in Python 3.1 and 3.2 through the use of import and install hooks. While it is significantly less likely, it is also conceivable that the hooks could be adapted to allow the use of the b prefix on Python 2.5. Complaint: The existing tools should be good enough for everyone A commonly expressed sentiment from developers that have already successfully ported applications to Python 3 is along the lines of "if you think it's hard, you're doing it wrong" or "it's not that hard, just try it!". While it is no doubt unintentional, these responses all have the effect of telling the people that are pointing out inadequacies in the current porting toolset "there's nothing wrong with the porting tools, you just suck and don't know how to use them properly". These responses are a case of completely missing the point of what people are complaining about. The feedback that resulted in this PEP isn't due to people complaining that ports aren't possible. Instead, the feedback is coming from people that have successfully completed ports and are objecting that they found the experience thoroughly unpleasant for the class of application that they needed to port (specifically, Unicode aware web frameworks and support libraries). This is a subjective appraisal, and it's the reason why the Python 3 porting tools ecosystem is a case where the "one obvious way to do it" philosophy emphatically does not apply. While it was originally intended that "develop in Python 2, convert with 2to3, test both" would be the standard way to develop for both versions in parallel, in practice, the needs of different projects and developer communities have proven to be sufficiently diverse that a variety of approaches have been devised, allowing each group to select an approach that best fits their needs. Lennart Regebro has produced an excellent overview of the available migration strategies[3], and a similar review is provided in the official porting guide[4]. (Note that the official guidance has softened to "it depends on your specific situation" since Lennart wrote his overview). However, both of those guides are written from the founding assumption that all of the developers involved are already committed to the idea of supporting Python 3. They make no allowance for the social aspects of such a change when you're interacting with a user base that may not be especially tolerant of disruptions without a clear benefit, or are trying to persuade Python 2 focused upstream developers to accept patches that are solely about improving Python 3 forward compatibility. With the current porting toolset, every migration strategy will result in changes to every Unicode literal in a project. No exceptions. They will be converted to either an unprefixed string literal (if the project decides to adopt the unicode_literals import) or else to a converter call like u("text"). If the unicode_literals import approach is employed, but is not adopted across the entire project at the same time, then the meaning of a bare string literal may become annoyingly ambiguous. This problem can be particularly pernicious for aggregated software, like a Django site - in such a situation, some files may end up using the unicode_literals import and others may not, creating definite potential for confusion. While these problems are clearly solvable at a technical level, they're a completely unnecessary distraction at the social level. Developer energy should be reserved for addressing real technical difficulties associated with the Python 3 transition (like distinguishing their 8-bit text strings from their binary data). They shouldn't be punished with additional code changes (even automated ones) solely due to the fact that they have already explicitly identified their Unicode strings in Python 2. Armin Ronacher has created an experimental extension to 2to3 which only modernizes Python code to the extent that it runs on Python 2.7 or later with support from the cross-version compatibility six library. This tool is available as python-modernize[5]. Currently, the deltas generated by this tool will affect every Unicode literal in the converted source. This will create legitimate concerns amongst upstream developers asked to accept such changes, and amongst framework users being asked to change their applications. However, by eliminating the noise from changes to the Unicode literal syntax, many projects could be cleanly and (comparatively) non-controversially made forward compatible with Python 3.3+ just by running python-modernize and applying the recommended changes. References Copyright This document has been placed in the public domain. Local Variables: mode: indented-text indent-tabs-mode: nil sentence-end-double-space: t fill-column: 70 End: [1] uprefix import hook project (https://bitbucket.org/vinay.sajip/uprefix) [2] install hook to remove unicode string prefix characters (https://github.com/mitsuhiko/unicode-literals-pep/tree/master/install-hook) [3] Porting to Python 3: Migration Strategies (http://python3porting.com/strategies.html) [4] Porting Python 2 Code to Python 3 (http://docs.python.org/howto/pyporting.html) [5] Python-Modernize (http://github.com/mitsuhiko/python-modernize)
python-peps
2024-10-18T13:23:33.764149
2012-02-15T00:00:00
{ "license": "Public Domain", "url": "https://peps.python.org/pep-0414/", "authors": [ "Armin Ronacher" ], "pep_number": "0414", "pandoc_version": "3.5" }
3152
PEP: 3152 Title: Cofunctions Version: $Revision$ Last-Modified: $Date$ Author: Gregory Ewing <[email protected]> Status: Rejected Type: Standards Track Content-Type: text/x-rst Created: 13-Feb-2009 Python-Version: 3.3 Post-History: Abstract A syntax is proposed for defining and calling a special type of generator called a 'cofunction'. It is designed to provide a streamlined way of writing generator-based coroutines, and allow the early detection of certain kinds of error that are easily made when writing such code, which otherwise tend to cause hard-to-diagnose symptoms. This proposal builds on the 'yield from' mechanism described in PEP 380, and describes some of the semantics of cofunctions in terms of it. However, it would be possible to define and implement cofunctions independently of PEP 380 if so desired. Rejection See https://mail.python.org/pipermail/python-dev/2015-April/139503.html Specification Cofunction definitions A new keyword codef is introduced which is used in place of def to define a cofunction. A cofunction is a special kind of generator having the following characteristics: 1. A cofunction is always a generator, even if it does not contain any yield or yield from expressions. 2. A cofunction cannot be called the same way as an ordinary function. An exception is raised if an ordinary call to a cofunction is attempted. Cocalls Calls from one cofunction to another are made by marking the call with a new keyword cocall. The expression : cocall f(*args, **kwds) is semantically equivalent to : yield from f.__cocall__(*args, **kwds) except that the object returned by __cocall__ is expected to be an iterator, so the step of calling iter() on it is skipped. The full syntax of a cocall expression is described by the following grammar lines: atom: cocall | <existing alternatives for atom> cocall: 'cocall' atom cotrailer* '(' [arglist] ')' cotrailer: '[' subscriptlist ']' | '.' NAME The cocall keyword is syntactically valid only inside a cofunction. A SyntaxError will result if it is used in any other context. Objects which implement __cocall__ are expected to return an object obeying the iterator protocol. Cofunctions respond to __cocall__ the same way as ordinary generator functions respond to __call__, i.e. by returning a generator-iterator. Certain objects that wrap other callable objects, notably bound methods, will be given __cocall__ implementations that delegate to the underlying object. New builtins, attributes and C API functions To facilitate interfacing cofunctions with non-coroutine code, there will be a built-in function costart whose definition is equivalent to : def costart(obj, *args, **kwds): return obj.__cocall__(*args, **kwds) There will also be a corresponding C API function : PyObject *PyObject_CoCall(PyObject *obj, PyObject *args, PyObject *kwds) It is left unspecified for now whether a cofunction is a distinct type of object or, like a generator function, is simply a specially-marked function instance. If the latter, a read-only boolean attribute __iscofunction__ should be provided to allow testing whether a given function object is a cofunction. Motivation and Rationale The yield from syntax is reasonably self-explanatory when used for the purpose of delegating part of the work of a generator to another function. It can also be used to good effect in the implementation of generator-based coroutines, but it reads somewhat awkwardly when used for that purpose, and tends to obscure the true intent of the code. Furthermore, using generators as coroutines is somewhat error-prone. If one forgets to use yield from when it should have been used, or uses it when it shouldn't have, the symptoms that result can be obscure and confusing. Finally, sometimes there is a need for a function to be a coroutine even though it does not yield anything, and in these cases it is necessary to resort to kludges such as if 0: yield to force it to be a generator. The codef and cocall constructs address the first issue by making the syntax directly reflect the intent, that is, that the function forms part of a coroutine. The second issue is addressed by making it impossible to mix coroutine and non-coroutine code in ways that don't make sense. If the rules are violated, an exception is raised that points out exactly what and where the problem is. Lastly, the need for dummy yields is eliminated by making the form of definition determine whether the function is a coroutine, rather than what it contains. Prototype Implementation An implementation in the form of patches to Python 3.1.2 can be found here: http://www.cosc.canterbury.ac.nz/greg.ewing/python/generators/cofunctions.html Copyright This document has been placed in the public domain. Local Variables: mode: indented-text indent-tabs-mode: nil sentence-end-double-space: t fill-column: 70 coding: utf-8 End:
python-peps
2024-10-18T13:23:33.775058
2009-02-13T00:00:00
{ "license": "Public Domain", "url": "https://peps.python.org/pep-3152/", "authors": [ "Gregory Ewing" ], "pep_number": "3152", "pandoc_version": "3.5" }
0364
PEP: 364 Title: Transitioning to the Py3K Standard Library Version: $Revision$ Last-Modified: $Date$ Author: Barry Warsaw <[email protected]> Status: Withdrawn Type: Standards Track Content-Type: text/x-rst Created: 01-Mar-2007 Python-Version: 2.6 Post-History: Abstract PEP 3108 describes the reorganization of the Python standard library for the Python 3.0 release. This PEP describes a mechanism for transitioning from the Python 2.x standard library to the Python 3.0 standard library. This transition will allow and encourage Python programmers to use the new Python 3.0 library names starting with Python 2.6, while maintaining the old names for backward compatibility. In this way, a Python programmer will be able to write forward compatible code without sacrificing interoperability with existing Python programs. Rationale PEP 3108 presents a rationale for Python standard library (stdlib) reorganization. The reader is encouraged to consult that PEP for details about why and how the library will be reorganized. Should PEP 3108 be accepted in part or in whole, then it is advantageous to allow Python programmers to begin the transition to the new stdlib module names in Python 2.x, so that they can write forward compatible code starting with Python 2.6. Note that PEP 3108 proposes to remove some "silly old stuff", i.e. modules that are no longer useful or necessary. The PEP you are reading does not address this because there are no forward compatibility issues for modules that are to be removed, except to stop using such modules. This PEP concerns only the mechanism by which mappings from old stdlib names to new stdlib names are maintained. Please consult PEP 3108 for all specific module renaming proposals. Specifically see the section titled Modules to Rename for guidelines on the old name to new name mappings. The few examples in this PEP are given for illustrative purposes only and should not be used for specific renaming recommendations. Supported Renamings There are at least 4 use cases explicitly supported by this PEP: - Simple top-level package name renamings, such as StringIO to stringio; - Sub-package renamings where the package name may or may not be renamed, such as email.MIMEText to email.mime.text; - Extension module renaming, such as cStringIO to cstringio; - Third party renaming of any of the above. Two use cases supported by this PEP include renaming simple top-level modules, such as StringIO, as well as modules within packages, such as email.MIMEText. In the former case, PEP 3108 currently recommends StringIO be renamed to stringio, following PEP 8 recommendations. In the latter case, the email 4.0 package distributed with Python 2.5 already renamed email.MIMEText to email.mime.text, although it did so in a one-off, uniquely hackish way inside the email package. The mechanism described in this PEP is general enough to handle all module renamings, obviating the need for the Python 2.5 hack (except for backward compatibility with earlier Python versions). An additional use case is to support the renaming of C extension modules. As long as the new name for the C module is importable, it can be remapped to the new name. E.g. cStringIO renamed to cstringio. Third party package renaming is also supported, via several public interfaces accessible by any Python module. Remappings are not performed recursively. .mv files Remapping files are called .mv files; the suffix was chosen to be evocative of the Unix mv(1) command. An .mv file is a simple line-oriented text file. All blank lines and lines that start with a # are ignored. All other lines must contain two whitespace separated fields. The first field is the old module name, and the second field is the new module name. Both module names must be specified using their full dotted-path names. Here is an example .mv file from Python 2.6: # Map the various string i/o libraries to their new names StringIO stringio cStringIO cstringio .mv files can appear anywhere in the file system, and there is a programmatic interface provided to parse them, and register the remappings inside them. By default, when Python starts up, all the .mv files in the oldlib package are read, and their remappings are automatically registered. This is where all the module remappings should be specified for top-level Python 2.x standard library modules. Implementation Specification This section provides the full specification for how module renamings in Python 2.x are implemented. The central mechanism relies on various import hooks as described in PEP 302. Specifically sys.path_importer_cache, sys.path, and sys.meta_path are all employed to provide the necessary functionality. When Python's import machinery is initialized, the oldlib package is imported. Inside oldlib there is a class called OldStdlibLoader. This class implements the PEP 302 interface and is automatically instantiated, with zero arguments. The constructor reads all the .mv files from the oldlib package directory, automatically registering all the remappings found in those .mv files. This is how the Python 2.x standard library is remapped. The OldStdlibLoader class should not be instantiated by other Python modules. Instead, you can access the global OldStdlibLoader instance via the sys.stdlib_remapper instance. Use this instance if you want programmatic access to the remapping machinery. One important implementation detail: as needed by the PEP 302 API, a magic string is added to sys.path, and module __path__ attributes in order to hook in our remapping loader. This magic string is currently <oldlib> and some changes were necessary to Python's site.py file in order to treat all sys.path entries starting with < as special. Specifically, no attempt is made to make them absolute file names (since they aren't file names at all). In order for the remapping import hooks to work, the module or package must be physically located under its new name. This is because the import hooks catch only modules that are not already imported, and cannot be imported by Python's built-in import rules. Thus, if a module has been moved, say from Lib/StringIO.py to Lib/stringio.py, and the former's .pyc file has been removed, then without the remapper, this would fail: import StringIO Instead, with the remapper, this failing import will be caught, the old name will be looked up in the registered remappings, and in this case, the new name stringio will be found. The remapper then attempts to import the new name, and if that succeeds, it binds the resulting module into sys.modules, under both the old and new names. Thus, the above import will result in entries in sys.modules for 'StringIO' and 'stringio', and both will point to the exact same module object. Note that no way to disable the remapping machinery is proposed, short of moving all the .mv files away or programmatically removing them in some custom start up code. In Python 3.0, the remappings will be eliminated, leaving only the "new" names. Programmatic Interface Several methods are added to the sys.stdlib_remapper object, which third party packages can use to register their own remappings. Note however that in all cases, there is one and only one mapping from an old name to a new name. If two .mv files contain different mappings for an old name, or if a programmatic call is made with an old name that is already remapped, the previous mapping is lost. This will not affect any already imported modules. The following methods are available on the sys.stdlib_remapper object: - read_mv_file(filename) -- Read the given file and register all remappings found in the file. - read_directory_mv_files(dirname, suffix='.mv') -- List the given directory, reading all files in that directory that have the matching suffix (.mv by default). For each parsed file, register all the remappings found in that file. - set_mapping(oldname, newname) -- Register a new mapping from an old module name to a new module name. Both must be the full dotted-path name to the module. newname may be None in which case any existing mapping for oldname will be removed (it is not an error if there is no existing mapping). - get_mapping(oldname, default=None) -- Return any registered newname for the given oldname. If there is no registered remapping, default is returned. Open Issues - Should there be a command line switch and/or environment variable to disable all remappings? - Should remappings occur recursively? - Should we automatically parse package directories for .mv files when the package's __init__.py is loaded? This would allow packages to easily include .mv files for their own remappings. Compare what the email package currently has to do if we place its .mv file in the email package instead of in the oldlib package: # Expose old names import os, sys sys.stdlib_remapper.read_directory_mv_files(os.path.dirname(__file__)) I think we should automatically read a package's directory for any .mv files it might contain. Reference Implementation A reference implementation, in the form of a patch against the current (as of this writing) state of the Python 2.6 svn trunk, is available as SourceForge patch #1675334[1]. Note that this patch includes a rename of cStringIO to cstringio, but this is primarily for illustrative and unit testing purposes. Should the patch be accepted, we might want to split this change off into other PEP 3108 changes. References Copyright This document has been placed in the public domain. Local Variables: mode: indented-text indent-tabs-mode: nil sentence-end-double-space: t fill-column: 70 coding: utf-8 End: [1] Reference implementation (http://bugs.python.org/issue1675334)
python-peps
2024-10-18T13:23:33.787942
2007-03-01T00:00:00
{ "license": "Public Domain", "url": "https://peps.python.org/pep-0364/", "authors": [ "Barry Warsaw" ], "pep_number": "0364", "pandoc_version": "3.5" }
0318
PEP: 318 Title: Decorators for Functions and Methods Version: $Revision$ Last-Modified: $Date$ Author: Kevin D. Smith <[email protected]>, Jim J. Jewett, Skip Montanaro, Anthony Baxter Status: Final Type: Standards Track Content-Type: text/x-rst Created: 05-Jun-2003 Python-Version: 2.4 Post-History: 09-Jun-2003, 10-Jun-2003, 27-Feb-2004, 23-Mar-2004, 30-Aug-2004, 02-Sep-2004 WarningWarningWarning This document is meant to describe the decorator syntax and the process that resulted in the decisions that were made. It does not attempt to cover the huge number of potential alternative syntaxes, nor is it an attempt to exhaustively list all the positives and negatives of each form. Abstract The current method for transforming functions and methods (for instance, declaring them as a class or static method) is awkward and can lead to code that is difficult to understand. Ideally, these transformations should be made at the same point in the code where the declaration itself is made. This PEP introduces new syntax for transformations of a function or method declaration. Motivation The current method of applying a transformation to a function or method places the actual transformation after the function body. For large functions this separates a key component of the function's behavior from the definition of the rest of the function's external interface. For example: def foo(self): perform method operation foo = classmethod(foo) This becomes less readable with longer methods. It also seems less than pythonic to name the function three times for what is conceptually a single declaration. A solution to this problem is to move the transformation of the method closer to the method's own declaration. The intent of the new syntax is to replace : def foo(cls): pass foo = synchronized(lock)(foo) foo = classmethod(foo) with an alternative that places the decoration in the function's declaration: @classmethod @synchronized(lock) def foo(cls): pass Modifying classes in this fashion is also possible, though the benefits are not as immediately apparent. Almost certainly, anything which could be done with class decorators could be done using metaclasses, but using metaclasses is sufficiently obscure that there is some attraction to having an easier way to make simple modifications to classes. For Python 2.4, only function/method decorators are being added. PEP 3129 proposes to add class decorators as of Python 2.6. Why Is This So Hard? Two decorators (classmethod() and staticmethod()) have been available in Python since version 2.2. It's been assumed since approximately that time that some syntactic support for them would eventually be added to the language. Given this assumption, one might wonder why it's been so difficult to arrive at a consensus. Discussions have raged off-and-on at times in both comp.lang.python and the python-dev mailing list about how best to implement function decorators. There is no one clear reason why this should be so, but a few problems seem to be most divisive. - Disagreement about where the "declaration of intent" belongs. Almost everyone agrees that decorating/transforming a function at the end of its definition is suboptimal. Beyond that there seems to be no clear consensus where to place this information. - Syntactic constraints. Python is a syntactically simple language with fairly strong constraints on what can and can't be done without "messing things up" (both visually and with regards to the language parser). There's no obvious way to structure this information so that people new to the concept will think, "Oh yeah, I know what you're doing." The best that seems possible is to keep new users from creating a wildly incorrect mental model of what the syntax means. - Overall unfamiliarity with the concept. For people who have a passing acquaintance with algebra (or even basic arithmetic) or have used at least one other programming language, much of Python is intuitive. Very few people will have had any experience with the decorator concept before encountering it in Python. There's just no strong preexisting meme that captures the concept. - Syntax discussions in general appear to cause more contention than almost anything else. Readers are pointed to the ternary operator discussions that were associated with PEP 308 for another example of this. Background There is general agreement that syntactic support is desirable to the current state of affairs. Guido mentioned syntactic support for decorators in his DevDay keynote presentation at the 10th Python Conference, though he later said it was only one of several extensions he proposed there "semi-jokingly". Michael Hudson raised the topic on python-dev shortly after the conference, attributing the initial bracketed syntax to an earlier proposal on comp.lang.python by Gareth McCaughan. Class decorations seem like an obvious next step because class definition and function definition are syntactically similar, however Guido remains unconvinced, and class decorators will almost certainly not be in Python 2.4. The discussion continued on and off on python-dev from February 2002 through July 2004. Hundreds and hundreds of posts were made, with people proposing many possible syntax variations. Guido took a list of proposals to EuroPython 2004, where a discussion took place. Subsequent to this, he decided that we'd have the Java-style @decorator syntax, and this appeared for the first time in 2.4a2. Barry Warsaw named this the 'pie-decorator' syntax, in honor of the Pie-thon Parrot shootout which occurred around the same time as the decorator syntax, and because the @ looks a little like a pie. Guido outlined his case on Python-dev, including this piece on some of the (many) rejected forms. On the name 'Decorator' There's been a number of complaints about the choice of the name 'decorator' for this feature. The major one is that the name is not consistent with its use in the GoF book. The name 'decorator' probably owes more to its use in the compiler area -- a syntax tree is walked and annotated. It's quite possible that a better name may turn up. Design Goals The new syntax should - work for arbitrary wrappers, including user-defined callables and the existing builtins classmethod() and staticmethod(). This requirement also means that a decorator syntax must support passing arguments to the wrapper constructor - work with multiple wrappers per definition - make it obvious what is happening; at the very least it should be obvious that new users can safely ignore it when writing their own code - be a syntax "that ... [is] easy to remember once explained" - not make future extensions more difficult - be easy to type; programs that use it are expected to use it very frequently - not make it more difficult to scan through code quickly. It should still be easy to search for all definitions, a particular definition, or the arguments that a function accepts - not needlessly complicate secondary support tools such as language-sensitive editors and other "toy parser tools out there" - allow future compilers to optimize for decorators. With the hope of a JIT compiler for Python coming into existence at some point this tends to require the syntax for decorators to come before the function definition - move from the end of the function, where it's currently hidden, to the front where it is more in your face Andrew Kuchling has links to a bunch of the discussions about motivations and use cases in his blog. Particularly notable is Jim Huginin's list of use cases. Current Syntax The current syntax for function decorators as implemented in Python 2.4a2 is: @dec2 @dec1 def func(arg1, arg2, ...): pass This is equivalent to: def func(arg1, arg2, ...): pass func = dec2(dec1(func)) without the intermediate assignment to the variable func. The decorators are near the function declaration. The @ sign makes it clear that something new is going on here. The rationale for the order of application (bottom to top) is that it matches the usual order for function-application. In mathematics, composition of functions (g o f)(x) translates to g(f(x)). In Python, @g @f def foo() translates to foo=g(f(foo). The decorator statement is limited in what it can accept -- arbitrary expressions will not work. Guido preferred this because of a gut feeling. The current syntax also allows decorator declarations to call a function that returns a decorator: @decomaker(argA, argB, ...) def func(arg1, arg2, ...): pass This is equivalent to: func = decomaker(argA, argB, ...)(func) The rationale for having a function that returns a decorator is that the part after the @ sign can be considered to be an expression (though syntactically restricted to just a function), and whatever that expression returns is called. See declaration arguments. Syntax Alternatives There have been a large number of different syntaxes proposed --rather than attempting to work through these individual syntaxes, it's worthwhile to break the syntax discussion down into a number of areas. Attempting to discuss each possible syntax individually would be an act of madness, and produce a completely unwieldy PEP. Decorator Location The first syntax point is the location of the decorators. For the following examples, we use the @syntax used in 2.4a2. Decorators before the def statement are the first alternative, and the syntax used in 2.4a2: @classmethod def foo(arg1,arg2): pass @accepts(int,int) @returns(float) def bar(low,high): pass There have been a number of objections raised to this location -- the primary one is that it's the first real Python case where a line of code has an effect on a following line. The syntax available in 2.4a3 requires one decorator per line (in a2, multiple decorators could be specified on the same line), and the final decision for 2.4 final stayed one decorator per line. People also complained that the syntax quickly got unwieldy when multiple decorators were used. The point was made, though, that the chances of a large number of decorators being used on a single function were small and thus this was not a large worry. Some of the advantages of this form are that the decorators live outside the method body -- they are obviously executed at the time the function is defined. Another advantage is that a prefix to the function definition fits the idea of knowing about a change to the semantics of the code before the code itself, thus you know how to interpret the code's semantics properly without having to go back and change your initial perceptions if the syntax did not come before the function definition. Guido decided he preferred having the decorators on the line before the 'def', because it was felt that a long argument list would mean that the decorators would be 'hidden' The second form is the decorators between the def and the function name, or the function name and the argument list: def @classmethod foo(arg1,arg2): pass def @accepts(int,int),@returns(float) bar(low,high): pass def foo @classmethod (arg1,arg2): pass def bar @accepts(int,int),@returns(float) (low,high): pass There are a couple of objections to this form. The first is that it breaks easily 'greppability' of the source -- you can no longer search for 'def foo(' and find the definition of the function. The second, more serious, objection is that in the case of multiple decorators, the syntax would be extremely unwieldy. The next form, which has had a number of strong proponents, is to have the decorators between the argument list and the trailing : in the 'def' line: def foo(arg1,arg2) @classmethod: pass def bar(low,high) @accepts(int,int),@returns(float): pass Guido summarized the arguments against this form (many of which also apply to the previous form) as: - it hides crucial information (e.g. that it is a static method) after the signature, where it is easily missed - it's easy to miss the transition between a long argument list and a long decorator list - it's cumbersome to cut and paste a decorator list for reuse, because it starts and ends in the middle of a line The next form is that the decorator syntax goes inside the method body at the start, in the same place that docstrings currently live: def foo(arg1,arg2): @classmethod pass def bar(low,high): @accepts(int,int) @returns(float) pass The primary objection to this form is that it requires "peeking inside" the method body to determine the decorators. In addition, even though the code is inside the method body, it is not executed when the method is run. Guido felt that docstrings were not a good counter-example, and that it was quite possible that a 'docstring' decorator could help move the docstring to outside the function body. The final form is a new block that encloses the method's code. For this example, we'll use a 'decorate' keyword, as it makes no sense with the @syntax. : decorate: classmethod def foo(arg1,arg2): pass decorate: accepts(int,int) returns(float) def bar(low,high): pass This form would result in inconsistent indentation for decorated and undecorated methods. In addition, a decorated method's body would start three indent levels in. Syntax forms - @decorator: @classmethod def foo(arg1,arg2): pass @accepts(int,int) @returns(float) def bar(low,high): pass The major objections against this syntax are that the @ symbol is not currently used in Python (and is used in both IPython and Leo), and that the @ symbol is not meaningful. Another objection is that this "wastes" a currently unused character (from a limited set) on something that is not perceived as a major use. - |decorator: |classmethod def foo(arg1,arg2): pass |accepts(int,int) |returns(float) def bar(low,high): pass This is a variant on the @decorator syntax -- it has the advantage that it does not break IPython and Leo. Its major disadvantage compared to the @syntax is that the | symbol looks like both a capital I and a lowercase l. - list syntax: [classmethod] def foo(arg1,arg2): pass [accepts(int,int), returns(float)] def bar(low,high): pass The major objection to the list syntax is that it's currently meaningful (when used in the form before the method). It's also lacking any indication that the expression is a decorator. - list syntax using other brackets (<...>, [[...]], ...): <classmethod> def foo(arg1,arg2): pass <accepts(int,int), returns(float)> def bar(low,high): pass None of these alternatives gained much traction. The alternatives which involve square brackets only serve to make it obvious that the decorator construct is not a list. They do nothing to make parsing any easier. The '<...>' alternative presents parsing problems because '<' and '>' already parse as un-paired. They present a further parsing ambiguity because a right angle bracket might be a greater than symbol instead of a closer for the decorators. - decorate() The decorate() proposal was that no new syntax be implemented -- instead a magic function that used introspection to manipulate the following function. Both Jp Calderone and Philip Eby produced implementations of functions that did this. Guido was pretty firmly against this -- with no new syntax, the magicness of a function like this is extremely high: Using functions with "action-at-a-distance" through sys.settraceback may be okay for an obscure feature that can't be had any other way yet doesn't merit changes to the language, but that's not the situation for decorators. The widely held view here is that decorators need to be added as a syntactic feature to avoid the problems with the postfix notation used in 2.2 and 2.3. Decorators are slated to be an important new language feature and their design needs to be forward-looking, not constrained by what can be implemented in 2.3. - _`new keyword (and block)` This idea was the consensus alternate from comp.lang.python (more on this in Community Consensus below.) Robert Brewer wrote up a detailed J2 proposal document outlining the arguments in favor of this form. The initial issues with this form are: - It requires a new keyword, and therefore a from __future__ import decorators statement. - The choice of keyword is contentious. However using emerged as the consensus choice, and is used in the proposal and implementation. - The keyword/block form produces something that looks like a normal code block, but isn't. Attempts to use statements in this block will cause a syntax error, which may confuse users. A few days later, Guido rejected the proposal on two main grounds, firstly: ... the syntactic form of an indented block strongly suggests that its contents should be a sequence of statements, but in fact it is not -- only expressions are allowed, and there is an implicit "collecting" of these expressions going on until they can be applied to the subsequent function definition. ... and secondly: ... the keyword starting the line that heads a block draws a lot of attention to it. This is true for "if", "while", "for", "try", "def" and "class". But the "using" keyword (or any other keyword in its place) doesn't deserve that attention; the emphasis should be on the decorator or decorators inside the suite, since those are the important modifiers to the function definition that follows. ... Readers are invited to read the full response. http://www.aminus.org/rbre/python/pydec.html https://mail.python.org/pipermail/python-dev/2004-September/048518.html https://mail.python.org/pipermail/python-dev/2004-September/048518.html - Other forms There are plenty of other variants and proposals on the wiki page. Why @? There is some history in Java using @ initially as a marker in Javadoc comments and later in Java 1.5 for annotations, which are similar to Python decorators. The fact that @ was previously unused as a token in Python also means it's clear there is no possibility of such code being parsed by an earlier version of Python, leading to possibly subtle semantic bugs. It also means that ambiguity of what is a decorator and what isn't is removed. That said, @ is still a fairly arbitrary choice. Some have suggested using | instead. For syntax options which use a list-like syntax (no matter where it appears) to specify the decorators a few alternatives were proposed: [|...|], *[...]*, and <...>. Current Implementation, History Guido asked for a volunteer to implement his preferred syntax, and Mark Russell stepped up and posted a patch to SF. This new syntax was available in 2.4a2. : @dec2 @dec1 def func(arg1, arg2, ...): pass This is equivalent to: def func(arg1, arg2, ...): pass func = dec2(dec1(func)) though without the intermediate creation of a variable named func. The version implemented in 2.4a2 allowed multiple @decorator clauses on a single line. In 2.4a3, this was tightened up to only allowing one decorator per line. A previous patch from Michael Hudson which implements the list-after-def syntax is also still kicking around. After 2.4a2 was released, in response to community reaction, Guido stated that he'd re-examine a community proposal, if the community could come up with a community consensus, a decent proposal, and an implementation. After an amazing number of posts, collecting a vast number of alternatives in the Python wiki, a community consensus emerged (below). Guido subsequently rejected this alternate form, but added: In Python 2.4a3 (to be released this Thursday), everything remains as currently in CVS. For 2.4b1, I will consider a change of @ to some other single character, even though I think that @ has the advantage of being the same character used by a similar feature in Java. It's been argued that it's not quite the same, since @ in Java is used for attributes that don't change semantics. But Python's dynamic nature makes that its syntactic elements never mean quite the same thing as similar constructs in other languages, and there is definitely significant overlap. Regarding the impact on 3rd party tools: IPython's author doesn't think there's going to be much impact; Leo's author has said that Leo will survive (although it will cause him and his users some transitional pain). I actually expect that picking a character that's already used elsewhere in Python's syntax might be harder for external tools to adapt to, since parsing will have to be more subtle in that case. But I'm frankly undecided, so there's some wiggle room here. I don't want to consider further syntactic alternatives at this point: the buck has to stop at some point, everyone has had their say, and the show must go on. Community Consensus This section documents the rejected J2 syntax, and is included for historical completeness. The consensus that emerged on comp.lang.python was the proposed J2 syntax (the "J2" was how it was referenced on the PythonDecorators wiki page): the new keyword using prefixing a block of decorators before the def statement. For example: using: classmethod synchronized(lock) def func(cls): pass The main arguments for this syntax fall under the "readability counts" doctrine. In brief, they are: - A suite is better than multiple @lines. The using keyword and block transforms the single-block def statement into a multiple-block compound construct, akin to try/finally and others. - A keyword is better than punctuation for a new token. A keyword matches the existing use of tokens. No new token category is necessary. A keyword distinguishes Python decorators from Java annotations and .Net attributes, which are significantly different beasts. Robert Brewer wrote a detailed proposal for this form, and Michael Sparks produced a patch. As noted previously, Guido rejected this form, outlining his problems with it in a message to python-dev and comp.lang.python. Examples Much of the discussion on comp.lang.python and the python-dev mailing list focuses on the use of decorators as a cleaner way to use the staticmethod() and classmethod() builtins. This capability is much more powerful than that. This section presents some examples of use. 1. Define a function to be executed at exit. Note that the function isn't actually "wrapped" in the usual sense. : def onexit(f): import atexit atexit.register(f) return f @onexit def func(): ... Note that this example is probably not suitable for real usage, but is for example purposes only. 2. Define a class with a singleton instance. Note that once the class disappears enterprising programmers would have to be more creative to create more instances. (From Shane Hathaway on python-dev.) : def singleton(cls): instances = {} def getinstance(): if cls not in instances: instances[cls] = cls() return instances[cls] return getinstance @singleton class MyClass: ... 3. Add attributes to a function. (Based on an example posted by Anders Munch on python-dev.) : def attrs(**kwds): def decorate(f): for k in kwds: setattr(f, k, kwds[k]) return f return decorate @attrs(versionadded="2.2", author="Guido van Rossum") def mymethod(f): ... 4. Enforce function argument and return types. Note that this copies the func_name attribute from the old to the new function. func_name was made writable in Python 2.4a3: def accepts(*types): def check_accepts(f): assert len(types) == f.func_code.co_argcount def new_f(*args, **kwds): for (a, t) in zip(args, types): assert isinstance(a, t), \ "arg %r does not match %s" % (a,t) return f(*args, **kwds) new_f.func_name = f.func_name return new_f return check_accepts def returns(rtype): def check_returns(f): def new_f(*args, **kwds): result = f(*args, **kwds) assert isinstance(result, rtype), \ "return value %r does not match %s" % (result,rtype) return result new_f.func_name = f.func_name return new_f return check_returns @accepts(int, (int,float)) @returns((int,float)) def func(arg1, arg2): return arg1 * arg2 5. Declare that a class implements a particular (set of) interface(s). This is from a posting by Bob Ippolito on python-dev based on experience with PyProtocols. : def provides(*interfaces): """ An actual, working, implementation of provides for the current implementation of PyProtocols. Not particularly important for the PEP text. """ def provides(typ): declareImplementation(typ, instancesProvide=interfaces) return typ return provides class IBar(Interface): """Declare something about IBar here""" @provides(IBar) class Foo(object): """Implement something here...""" Of course, all these examples are possible today, though without syntactic support. (No longer) Open Issues 1. It's not yet certain that class decorators will be incorporated into the language at a future point. Guido expressed skepticism about the concept, but various people have made some strong arguments (search for PEP 318 -- posting draft) on their behalf in python-dev. It's exceedingly unlikely that class decorators will be in Python 2.4. https://mail.python.org/pipermail/python-dev/2004-March/thread.html PEP 3129 proposes to add class decorators as of Python 2.6. 2. The choice of the @ character will be re-examined before Python 2.4b1. In the end, the @ character was kept. Copyright This document has been placed in the public domain. Local Variables: mode: indented-text indent-tabs-mode: nil sentence-end-double-space: t fill-column: 70 End:
python-peps
2024-10-18T13:23:33.822822
2003-06-05T00:00:00
{ "license": "Public Domain", "url": "https://peps.python.org/pep-0318/", "authors": [ "Anthony Baxter", "Jim J. Jewett", "Kevin D. Smith", "Skip Montanaro" ], "pep_number": "0318", "pandoc_version": "3.5" }
0730
PEP: 730 Title: Adding iOS as a supported platform Author: Russell Keith-Magee <[email protected]> Sponsor: Ned Deily <[email protected]> Discussions-To: https://discuss.python.org/t/pep730-adding-ios-as-a-supported-platform/35854 Status: Final Type: Standards Track Content-Type: text/x-rst Created: 09-Oct-2023 Python-Version: 3.13 Resolution: https://discuss.python.org/t/pep-730-adding-ios-as-a-supported-platform/35854/66 python:using-ios Abstract This PEP proposes adding iOS as a supported platform in CPython. The initial goal is to achieve Tier 3 support for Python 3.13. This PEP describes the technical aspects of the changes that are required to support iOS. It also describes the project management concerns related to adoption of iOS as a Tier 3 platform. Motivation Over the last 15 years, mobile platforms have become increasingly important parts of the computing landscape. iOS is one of two operating systems that control the vast majority of these devices. However, there is no official support for iOS in CPython. The BeeWare Project and Kivy have both supported iOS for almost 10 years. This support has been able to generate applications that have been accepted for publication in the iOS App Store. This demonstrates the technical feasibility of iOS support. It is important for the future of Python as a language that it is able to be used on any hardware or OS that has widespread adoption. If Python cannot be used a on a platform that has widespread use, adoption of the language will be impacted as potential users will adopt other languages that do provide support for these platforms. Rationale Development landscape iOS provides a single API, but 2 distinct ABIs - iphoneos (physical devices), and iphonesimulator. Each of these ABIs can be provided on multiple CPU architectures. At time of writing, Apple officially supports arm64 on the device ABI, and arm64 and x86_64 are supported on the simulator ABI. As with macOS, iOS supports the creation of "fat" binaries that contains multiple CPU architectures. However, fat binaries cannot span ABIs. That is, it is possible to have a fat simulator binary, and a fat device binary, but it is not possible to create a single fat "iOS" binary that covers both simulator and device needs. To support distribution of a single development artefact, Apple uses an "XCframework" structure - a wrapper around multiple ABIs that implement a common API. iOS runs on a Darwin kernel, similar to macOS. However, there is a need to differentiate between macOS and iOS at an implementation level, as there are significant platform differences between iOS and macOS. iOS code is compiled for compatibility against a minimum iOS version. Apple frequently refers to "iPadOS" in their marketing material. However, from a development perspective, there is no discernable difference between iPadOS and iOS. A binary that has been compiled for the iphoneos or iphonesimulator ABIs can be deployed on iPad. Other Apple platforms, such as tvOS, watchOS, and visionOS, use different ABIs, and are not covered by this PEP. POSIX compliance iOS is broadly a POSIX platform. However, similar to WASI/Emscripten, there are POSIX APIs that exist on iOS, but cannot be used; and POSIX APIs that don't exist at all. Most notable of these is the fact that iOS does not provide any form of multiprocess support. fork and spawn both exist in the iOS API; however, if they are invoked, the invoking iOS process stops, and the new process doesn't start. Unlike WASI/Emscripten, threading is supported on iOS. There are also significant limits to socket handling. Due to process sandboxing, there is no availability of interprocess communication via socket. However, sockets for network communication are available. Dynamic libraries The iOS App Store guidelines allow apps to be written in languages other than Objective C or Swift. However, they have very strict guidelines about the structure of apps that are submitted for distribution. iOS apps can use dynamically loaded libraries; however, there are very strict requirements on how dynamically loaded content is packaged for use on iOS: - Dynamic binary content must be compiled as dynamic libraries, not shared objects or binary bundles. - They must be packaged in the app bundle as Frameworks. - Each Framework can only contain a single dynamic library. - The Framework must be contained in the iOS App's Frameworks folder. - A Framework may not contain any non-library content. This imposes some constraints on the operation of CPython. It is not possible store binary modules in the lib-dynload and/or site-packages folders; they must be stored in the app's Frameworks folder, with each module wrapped in a Framework. This also means that the common assumption that a Python module can construct the location of a binary module by using the __file__ attribute of the Python module no longer holds. As with macOS, compiling a binary module that is accessible from a statically-linked build of Python requires the use of the --undefined dynamic_lookup option to avoid linking libpython3.x into every binary module. However, on iOS, this compiler flag raises a deprecation warning when it is used. A warning from this flag has been observed on macOS as well - however, responses from Apple staff suggest that they do not intend to break the CPython ecosystem by removing this option. As Python does not currently have a notable presence on iOS, it is difficult to judge whether iOS usage of this flag would fall under the same umbrella. Console and interactive usage Distribution of a traditional CPython REPL or interactive "python.exe" should not be considered a goal of this work. Mobile devices (including iOS) do not provide a TTY-style console. They do not provide stdin, stdout or stderr. iOS provides a system log, and it is possible to install a redirection so that all stdout and stderr content is redirected to the system log; but there is no analog for stdin. In addition, iOS places restrictions on downloading additional code at runtime (as this behavior would be functionally indistinguishable from trying to work around App Store review). As a result, a traditional "create a virtual environment and pip install" development experience will not be viable on iOS. It is possible to build an native iOS application that provides a REPL interface. This would be closer to an IDLE-style user experience; however, Tkinter cannot be used on iOS, so any app would require a ground-up rewrite. The iOS app store already contains several examples of apps in this category (e.g., Pythonista and Pyto). The focus of this work would be to provide an embedded distribution that IDE-style native interfaces could utilize, not a user-facing "app" interface to iOS on Python. Specification Platform identification sys sys.platform will identify as "ios" on both simulator and physical devices. sys.implementation._multiarch will describe the ABI and CPU architecture: - "arm64-iphoneos" for ARM64 devices - "arm64-iphonesimulator" for ARM64 simulators - "x86_64-iphonesimulator" for x86_64 simulators platform platform will be modified to support returning iOS-specific details. Most of the values returned by the platform module will match those returned by os.uname(), with the exception of: - platform.system() - "iOS" or iPadOS (depending on the hardware in use), instead of "Darwin" - platform.release() - the iOS version number, as a string (e.g., "16.6.1"), instead of the Darwin kernel version. In addition, a platform.ios_ver() method will be added. This mirrors platform.mac_ver(), which can be used to provide macOS version information. ios_ver() will return a namedtuple that contains the following: - system - the OS name (iOS or iPadOS, depending on hardware) - release - the iOS version, as a string (e.g., "16.6.1"). - model - the model identifier of the device, as a string (e.g., "iPhone13,2"). On simulators, this will return "iPhone" or "iPad", depending on the simulator device. - is_simulator - a boolean indicating if the device is a simulator. os os.uname() will return the raw result of a POSIX uname() call. This will result in the following values: - sysname - "Darwin" - release - The Darwin kernel version (e.g., "22.6.0") This approach treats the os module as a "raw" interface to system APIs, and platform as a higher-level API providing more generally useful values. sysconfig The sysconfig module will use the minimum iOS version as part of sysconfig.get_platform() (e.g., "ios-12.0-arm64-iphoneos"). The sysconfigdata_name and Config makefile will follow the same patterns as existing platforms (using sys.platform, sys.implementation._multiarch etc.) to construct identifiers. Subprocess support iOS will leverage the pattern for disabling subprocesses established by WASI/Emscripten. The subprocess module will raise an exception if an attempt is made to start a subprocess, and os.fork and os.spawn calls will raise an OSError. Dynamic module loading To accommodate iOS dynamic loading, the importlib bootstrap will be extended to add a metapath finder that can convert a request for a Python binary module into a Framework location. This finder will only be installed if sys.platform == "ios". This finder will convert a Python module name (e.g., foo.bar._whiz) into a unique Framework name by using the full module name as the framework name (i.e., foo.bar._whiz.framework). A framework is a directory; the finder will look for a binary named foo.bar._whiz in that directory. Compilation The only binary format that will be supported is a dynamically-linkable libpython3.x.dylib, packaged in an iOS-compatible framework format. While the --undefined dynamic_lookup compiler option currently works, the long-term viability of the option cannot be guaranteed. Rather than rely on a compiler flag with an uncertain future, binary modules on iOS will be linked with libpython3.x.dylib. This means iOS binary modules will not be loadable by an executable that has been statically linked against libpython3.x.a. Therefore, a static libpython3.x.a iOS library will not be supported. This is the same pattern used by CPython on Windows. Building CPython for iOS requires the use of the cross-platform tooling in CPython's configure build system. A single configure/make/make install pass will produce a Python.framework artefact that can be used on a single ABI and architecture. Additional tooling will be required to merge the Python.framework builds for multiple architectures into a single "fat" library. Tooling will also be required to merge multiple ABIs into the XCframework format that Apple uses to distribute multiple frameworks for different ABIs in a single bundle. An Xcode project will be provided for the purpose of running the CPython test suite. Tooling will be provided to automate the process of compiling the test suite binary, start the simulator, install the test suite, and execute it. Distribution Adding iOS as a Tier 3 platform only requires adding support for compiling an iOS-compatible build from an unpatched CPython code checkout. It does not require production of officially distributed iOS artefacts for use by end-users. If/when iOS is updated to Tier 2 or 1 support, the tooling used to generate an XCframework package could be used to produce an iOS distribution artefact. This could then be distributed as an "embedded distribution" analogous to the Windows embedded distribution, or as a CocoaPod or Swift package that could be added to an Xcode project. CI resources Anaconda has offered to provide physical hardware to run iOS buildbots. GitHub Actions is able to host iOS simulators on their macOS machines, and the iOS simulator can be controlled by scripting environments. The free tier currently only provides x86_64 macOS machines; however ARM64 runners recently became available on paid plans <https://github.blog/ 2023-10-02-introducing-the-new-apple-silicon-powered-m1-macos-larger-runner-for-github-actions/>__. However, in order to avoid exhausting macOS runner resources, a GitHub Actions run for iOS will not be added as part of the standard CI configuration. Packaging iOS will not provide a "universal" wheel format. Instead, wheels will be provided for each ABI-arch combination. iOS wheels will use tags: - ios_12_0_arm64_iphoneos - ios_12_0_arm64_iphonesimulator - ios_12_0_x86_64_iphonesimulator In these tags, "12.0" is the minimum supported iOS version. As with macOS, the tag will incorporate the minimum iOS version that is selected when the wheel is compiled; a wheel compiled with a minimum iOS version of 15.0 would use the ios_15_0_* tags. At time of writing, iOS 12.0 exposes most significant iOS features, while reaching near 100% of devices; this will be used as a floor for iOS version matching. These wheels can include binary modules in-situ (i.e., co-located with the Python source, in the same way as wheels for a desktop platform); however, they will need to be post-processed as binary modules need to be moved into the "Frameworks" location for distribution. This can be automated with an Xcode build step. PEP 11 Update PEP 11 will be updated to include two of the iOS ABIs: - arm64-apple-ios - arm64-apple-ios-simulator Ned Deily will serve as the initial core team contact for these ABIs. The x86_64-apple-ios-simulator target will be supported on a best-effort basis, but will not be targeted for tier 3 support. This is due to the impending deprecation of x86_64 as a simulation platform, combined with the difficulty of commissioning x86_64 macOS hardware at this time. Backwards Compatibility Adding a new platform does not introduce any backwards compatibility concerns to CPython itself. There may be some backwards compatibility implications on the projects that have historically provided CPython support (i.e., BeeWare and Kivy) if the final form of any CPython patches don't align with the patches they have historically used. Although not strictly a backwards compatibility issue, there is a platform adoption consideration. Although CPython itself may support iOS, if it is unclear how to produce iOS-compatible wheels, and prominent libraries like cryptography, Pillow, and NumPy don't provide iOS wheels, the ability of the community to adopt Python on iOS will be limited. Therefore, it will be necessary to clearly document how projects can add iOS builds to their CI and release tooling. Adding iOS support to tools like crossenv and cibuildwheel may be one way to achieve this. Security Implications Adding iOS as a new platform does not add any security implications. How to Teach This The education needs related to this PEP mostly relate to how end-users can add iOS support to their own Xcode projects. This can be accomplished with documentation and tutorials on that process. The need for this documentation will increase if/when support raises from Tier 3 to Tier 2 or 1; however, this transition should also be accompanied with simplified deployment artefacts (such as a Cocoapod or Swift package) that are integrated with Xcode development. Reference Implementation The BeeWare Python-Apple-support repository contains a reference patch and build tooling to compile a distributable artefact. Briefcase provides a reference implementation of code to execute test suites on iOS simulators. The Toga Testbed is an example of a test suite that is executed on the iOS simulator using GitHub Actions. Rejected Ideas Simulator identification Earlier versions of this PEP suggested the inclusion of sys.implementation._simulator attribute to identify when code is running on device, or on a simulator. This was rejected due to the use of a protected name for a public API, plus the pollution of the sys namespace with an iOS-specific detail. Another proposal during discussion was to include a generic platform.is_emulator() API that could be implemented by any platform - for example to differentiate running on x86_64 code on ARM64 hardware, or when running in QEMU or other virtualization methods. This was rejected on the basis that it wasn't clear what a consistent interpretation of "emulator" would be, or how an emulator would be detected outside of the iOS case. The decision was made to keep this detail iOS-specific, and include it on the platform.ios_ver() API. GNU compiler triples autoconf requires the use of a GNU compiler triple to identify build and host platforms. However, the autoconf toolchain doesn't provide native support for iOS simulators, so we are left with the task of working out how to squeeze iOS hardware into GNU's naming regimen. This can be done (with some patching of config.sub), but it leads to 2 major sources of naming inconsistency: - arm64 vs aarch64 as an identifier of 64-bit ARM hardware; and - What identifier is used to represent simulators. Apple's own tools use arm64 as the architecture, but appear to be tolerant of aarch64 in some cases. The device platform is identified as iphoneos and iphonesimulator. Rust toolchains uses aarch64 as the architecture, and use aarch64-apple-ios and aarch64-apple-ios-sim to identify the device platform; however, they use x86_64-apple-ios to represent iOS simulators on x86_64 hardware. The decision was made to use arm64-apple-ios and arm64-apple-ios-simulator because: 1. The autoconf toolchain already contains support for ios as a platform in config.sub; it's only the simulator that doesn't have a representation. 2. The third part of the host triple is used as sys.platform. 3. When Apple's own tools reference CPU architecture, they use arm64, and the GNU tooling usage of the architecture isn't visible outside the build process. 4. When Apple's own tools reference simulator status independent of the OS (e.g., in the naming of Swift submodules), they use a -simulator suffix. 5. While some iOS packages will use Rust, all iOS packages will use Apple's tooling. The initially accepted version of this document used the aarch64 form as the PEP 11 identifier; this was corrected during finalization. "Universal" wheel format macOS currently supports 2 CPU architectures. To aid the end-user development experience, Python defines a "universal2" wheel format that incorporates both x86_64 and ARM64 binaries. It would be conceptually possible to offer an analogous "universal" iOS wheel format. However, this PEP does not use this approach, for 2 reasons. Firstly, the experience on macOS, especially in the numerical Python ecosystem, has been that universal wheels can be exceedingly difficult to accommodate. While native macOS libraries maintain strong multi-platform support, and Python itself has been updated, the vast majority of upstream non-Python libraries do not provide multi-architecture build support. As a result, compiling universal wheels inevitably requires multiple compilation passes, and complex decisions over how to distribute header files for different architectures. As a result of this complexity, many popular projects (including NumPy and Pillow) do not provide universal wheels at all, instead providing separate ARM64 and x86_64 wheels. Secondly, historical experience is that iOS would require a much more fluid "universal" definition. In the last 10 years, there have been at least 5 different possible interpretations of "universal" that would apply to iOS, including various combinations of armv6, armv7, armv7s, arm64, x86 and x86_64 architectures, on device and simulator. If defined right now, "universal-iOS" would likely include x86_64 and arm64 on simulator, and arm64 on device; however, the pending deprecation of x86_64 hardware would add another interpretation; and there may be a need to add arm64e as a new device architecture in the future. Specifying iOS wheels as single-platform-only means the Python core team can avoid an ongoing standardization discussion about the updated "universal" formats. It also means wheel publishers are able to make per-project decisions over which platforms are feasible to support. For example, a project may choose to drop x86_64 support, or adopt a new architecture earlier than other parts of the Python ecosystem. Using platform-specific wheels means this decision can be left to individual package publishers. This decision comes at cost of making deployment more complicated. However, deployment on iOS is already a complicated process that is best aided by tools. At present, no binary merging is required, as there is only one on-device architecture, and simulator binaries are not considered to be distributable artefacts, so only one architecture is needed to build an app for a simulator. Supporting static builds While the long-term viability of the --undefined dynamic_lookup option cannot be guaranteed, the option does exist, and it works. One option would be to ignore the deprecation warning, and hope that Apple either reverses the deprecation decision, or never finalizes the deprecation. Given that Apple's decision-making process is entirely opaque, this would be, at best, a risky option. When combined with the fact that the broader iOS development ecosystem encourages the use of frameworks, there are no legacy uses of a static library to consider, and the only benefit to a statically-linked iOS libpython3.x.a is a very slightly reduced app startup time, omitting support for static builds of libpython3.x seems a reasonable compromise. It is worth noting that there has been some discussion on an alternate approach to linking on macOS that would remove the need for the --undefined dynamic_lookup option, although discussion on this approach appears to have stalled due to complications in implementation. If those complications were to be overcome, it is highly likely that the same approach could be used on iOS, which would make a statically linked libpython3.x.a plausible. The decision to link binary modules against libpython3.x.dylib would complicate the introduction of static libpython3.x.a builds in the future, as the process of moving to a different binary module linking approach would require a clear way to differentate "dynamically-linked" iOS binary modules from "static-compatible" iOS binary modules. However, given the lack of tangible benefits of a static libpython3.x.a, it seems unlikely that there will be any requirement to make this change. Interactive/REPL mode A traditional python.exe command line experience isn't really viable on mobile devices, because mobile devices don't have a command line. iOS apps don't have a stdout, stderr or stdin; and while you can redirect stdout and stderr to the system log, there's no source for stdin that exists that doesn't also involve building a very specific user-facing app that would be closer to an IDLE-style IDE experience. Therefore, the decision was made to only focus on "embedded mode" as a target for mobile distribution. x86_64 simulator support Apple no longer sells x86_64 hardware. As a result, commissioning an x86_64 buildbot can be difficult. It is possible to run macOS binaries in x86_64 compatibility mode on ARM64 hardware; however, this isn't ideal for testing purposes. Therefore, the x86_64 Simulator (x86_64-apple-ios-simulator) will not be added as a Tier 3 target. It is highly likely that iOS support will work on the x86_64 without any modification; this only impacts on the official Tier 3 status. On-device testing CI testing on simulators can be accommodated reasonably easily. On-device testing is much harder, as availability of device farms that could be configured to provide Buildbots or Github Actions runners is limited. However, on device testing may not be necessary. As a data point - Apple's Xcode Cloud solution doesn't provide on-device testing. They rely on the fact that the API is consistent between device and simulator, and ARM64 simulator testing is sufficient to reveal CPU-specific issues. Ordering of _multiarch tags The initially accepted version of this document used <platform>-<arch> ordering (e.g., iphoneos-arm64) for sys.implementation._multiarch (and related values, such as wheel tags). The final merged version uses the <arch>-<platform> ordering (e.g., arm64-iphoneos). This is for consistency with compiler triples on other platforms (especially Linux), which specify the architecture before the operating system. Values returned by platform.ios_ver() The initially accepted version of this document didn't include a system identifier. This was added during the implementation phase to support the implementation of platform.system(). The initially accepted version of this document also described that min_release would be returned in the ios_ver() result. The final version omits the min_release value, as it is not significant at runtime; it only impacts on binary compatibility. The minimum version is included in the value returned by sysconfig.get_platform(), as this is used to define wheel (and other binary) compatibility. Copyright This document is placed in the public domain or under the CC0-1.0-Universal license, whichever is more permissive.
python-peps
2024-10-18T13:23:33.855438
2023-10-09T00:00:00
{ "license": "Public Domain", "url": "https://peps.python.org/pep-0730/", "authors": [ "Russell Keith-Magee" ], "pep_number": "0730", "pandoc_version": "3.5" }
0469
PEP: 469 Title: Migration of dict iteration code to Python 3 Version: $Revision$ Last-Modified: $Date$ Author: Alyssa Coghlan <[email protected]> Status: Withdrawn Type: Standards Track Content-Type: text/x-rst Created: 18-Apr-2014 Python-Version: 3.5 Post-History: 18-Apr-2014, 21-Apr-2014 Abstract For Python 3, PEP 3106 changed the design of the dict builtin and the mapping API in general to replace the separate list based and iterator based APIs in Python 2 with a merged, memory efficient set and multiset view based API. This new style of dict iteration was also added to the Python 2.7 dict type as a new set of iteration methods. This means that there are now 3 different kinds of dict iteration that may need to be migrated to Python 3 when an application makes the transition: - Lists as mutable snapshots: d.items() -> list(d.items()) - Iterator objects: d.iteritems() -> iter(d.items()) - Set based dynamic views: d.viewitems() -> d.items() There is currently no widely agreed best practice on how to reliably convert all Python 2 dict iteration code to the common subset of Python 2 and 3, especially when test coverage of the ported code is limited. This PEP reviews the various ways the Python 2 iteration APIs may be accessed, and looks at the available options for migrating that code to Python 3 by way of the common subset of Python 2.6+ and Python 3.0+. The PEP also considers the question of whether or not there are any additions that may be worth making to Python 3.5 that may ease the transition process for application code that doesn't need to worry about supporting earlier versions when eventually making the leap to Python 3. PEP Withdrawal In writing the second draft of this PEP, I came to the conclusion that the readability of hybrid Python 2/3 mapping code can actually be best enhanced by better helper functions rather than by making changes to Python 3.5+. The main value I now see in this PEP is as a clear record of the recommended approaches to migrating mapping iteration code from Python 2 to Python 3, as well as suggesting ways to keep things readable and maintainable when writing hybrid code that supports both versions. Notably, I recommend that hybrid code avoid calling mapping iteration methods directly, and instead rely on builtin functions where possible, and some additional helper functions for cases that would be a simple combination of a builtin and a mapping method in pure Python 3 code, but need to be handled slightly differently to get the exact same semantics in Python 2. Static code checkers like pylint could potentially be extended with an optional warning regarding direct use of the mapping iteration methods in a hybrid code base. Mapping iteration models Python 2.7 provides three different sets of methods to extract the keys, values and items from a dict instance, accounting for 9 out of the 18 public methods of the dict type. In Python 3, this has been rationalised to just 3 out of 11 public methods (as the has_key method has also been removed). Lists as mutable snapshots This is the oldest of the three styles of dict iteration, and hence the one implemented by the d.keys(), d.values() and d.items() methods in Python 2. These methods all return lists that are snapshots of the state of the mapping at the time the method was called. This has a few consequences: - the original object can be mutated freely without affecting iteration over the snapshot - the snapshot can be modified independently of the original object - the snapshot consumes memory proportional to the size of the original mapping The semantic equivalent of these operations in Python 3 are list(d.keys()), list(d.values()) and list(d.iteritems()). Iterator objects In Python 2.2, dict objects gained support for the then-new iterator protocol, allowing direct iteration over the keys stored in the dictionary, thus avoiding the need to build a list just to iterate over the dictionary contents one entry at a time. iter(d) provides direct access to the iterator object for the keys. Python 2 also provides a d.iterkeys() method that is essentially synonymous with iter(d), along with d.itervalues() and d.iteritems() methods. These iterators provide live views of the underlying object, and hence may fail if the set of keys in the underlying object is changed during iteration: >>> d = dict(a=1) >>> for k in d: ... del d[k] ... Traceback (most recent call last): File "<stdin>", line 1, in <module> RuntimeError: dictionary changed size during iteration As iterators, iteration over these objects is also a one-time operation: once the iterator is exhausted, you have to go back to the original mapping in order to iterate again. In Python 3, direct iteration over mappings works the same way as it does in Python 2. There are no method based equivalents - the semantic equivalents of d.itervalues() and d.iteritems() in Python 3 are iter(d.values()) and iter(d.items()). The six and future.utils compatibility modules also both provide iterkeys(), itervalues() and iteritems() helper functions that provide efficient iterator semantics in both Python 2 and 3. Set based dynamic views The model that is provided in Python 3 as a method based API is that of set based dynamic views (technically multisets in the case of the values() view). In Python 3, the objects returned by d.keys(), d.values() and d. items() provide a live view of the current state of the underlying object, rather than taking a full snapshot of the current state as they did in Python 2. This change is safe in many circumstances, but does mean that, as with the direct iteration API, it is necessary to avoid adding or removing keys during iteration, in order to avoid encountering the following error: >>> d = dict(a=1) >>> for k, v in d.items(): ... del d[k] ... Traceback (most recent call last): File "<stdin>", line 1, in <module> RuntimeError: dictionary changed size during iteration Unlike the iteration API, these objects are iterables, rather than iterators: you can iterate over them multiple times, and each time they will iterate over the entire underlying mapping. These semantics are also available in Python 2.7 as the d.viewkeys(), d.viewvalues() and d.viewitems() methods. The future.utils compatibility module also provides viewkeys(), viewvalues() and viewitems() helper functions when running on Python 2.7 or Python 3.x. Migrating directly to Python 3 The 2to3 migration tool handles direct migrations to Python 3 in accordance with the semantic equivalents described above: - d.keys() -> list(d.keys()) - d.values() -> list(d.values()) - d.items() -> list(d.items()) - d.iterkeys() -> iter(d.keys()) - d.itervalues() -> iter(d.values()) - d.iteritems() -> iter(d.items()) - d.viewkeys() -> d.keys() - d.viewvalues() -> d.values() - d.viewitems() -> d.items() Rather than 9 distinct mapping methods for iteration, there are now only the 3 view methods, which combine in straightforward ways with the two relevant builtin functions to cover all of the behaviours that are available as dict methods in Python 2.7. Note that in many cases d.keys() can be replaced by just d, but the 2to3 migration tool doesn't attempt that replacement. The 2to3 migration tool also does not provide any automatic assistance for migrating references to these objects as bound or unbound methods - it only automates conversions where the API is called immediately. Migrating to the common subset of Python 2 and 3 When migrating to the common subset of Python 2 and 3, the above transformations are not generally appropriate, as they all either result in the creation of a redundant list in Python 2, have unexpectedly different semantics in at least some cases, or both. Since most code running in the common subset of Python 2 and 3 supports at least as far back as Python 2.6, the currently recommended approach to conversion of mapping iteration operation depends on two helper functions for efficient iteration over mapping values and mapping item tuples: - d.keys() -> list(d) - d.values() -> list(itervalues(d)) - d.items() -> list(iteritems(d)) - d.iterkeys() -> iter(d) - d.itervalues() -> itervalues(d) - d.iteritems() -> iteritems(d) Both six and future.utils provide appropriate definitions of itervalues() and iteritems() (along with essentially redundant definitions of iterkeys()). Creating your own definitions of these functions in a custom compatibility module is also relatively straightforward: try: dict.iteritems except AttributeError: # Python 3 def itervalues(d): return iter(d.values()) def iteritems(d): return iter(d.items()) else: # Python 2 def itervalues(d): return d.itervalues() def iteritems(d): return d.iteritems() The greatest loss of readability currently arises when converting code that actually needs the list based snapshots that were the default in Python 2. This readability loss could likely be mitigated by also providing listvalues and listitems helper functions, allowing the affected conversions to be simplified to: - d.values() -> listvalues(d) - d.items() -> listitems(d) The corresponding compatibility function definitions are as straightforward as their iterator counterparts: try: dict.iteritems except AttributeError: # Python 3 def listvalues(d): return list(d.values()) def listitems(d): return list(d.items()) else: # Python 2 def listvalues(d): return d.values() def listitems(d): return d.items() With that expanded set of compatibility functions, Python 2 code would then be converted to "idiomatic" hybrid 2/3 code as: - d.keys() -> list(d) - d.values() -> listvalues(d) - d.items() -> listitems(d) - d.iterkeys() -> iter(d) - d.itervalues() -> itervalues(d) - d.iteritems() -> iteritems(d) This compares well for readability with the idiomatic pure Python 3 code that uses the mapping methods and builtins directly: - d.keys() -> list(d) - d.values() -> list(d.values()) - d.items() -> list(d.items()) - d.iterkeys() -> iter(d) - d.itervalues() -> iter(d.values()) - d.iteritems() -> iter(d.items()) It's also notable that when using this approach, hybrid code would never invoke the mapping methods directly: it would always invoke either a builtin or helper function instead, in order to ensure the exact same semantics on both Python 2 and 3. Migrating from Python 3 to the common subset with Python 2.7 While the majority of migrations are currently from Python 2 either directly to Python 3 or to the common subset of Python 2 and Python 3, there are also some migrations of newer projects that start in Python 3 and then later add Python 2 support, either due to user demand, or to gain access to Python 2 libraries that are not yet available in Python 3 (and porting them to Python 3 or creating a Python 3 compatible replacement is not a trivial exercise). In these cases, Python 2.7 compatibility is often sufficient, and the 2.7+ only view based helper functions provided by future.utils allow the bare accesses to the Python 3 mapping view methods to be replaced with code that is compatible with both Python 2.7 and Python 3 (note, this is the only migration chart in the PEP that has Python 3 code on the left of the conversion): - d.keys() -> viewkeys(d) - d.values() -> viewvalues(d) - d.items() -> viewitems(d) - list(d.keys()) -> list(d) - list(d.values()) -> listvalues(d) - list(d.items()) -> listitems(d) - iter(d.keys()) -> iter(d) - iter(d.values()) -> itervalues(d) - iter(d.items()) -> iteritems(d) As with migrations from Python 2 to the common subset, note that the hybrid code ends up never invoking the mapping methods directly - it only calls builtins and helper methods, with the latter addressing the semantic differences between Python 2 and Python 3. Possible changes to Python 3.5+ The main proposal put forward to potentially aid migration of existing Python 2 code to Python 3 is the restoration of some or all of the alternate iteration APIs to the Python 3 mapping API. In particular, the initial draft of this PEP proposed making the following conversions possible when migrating to the common subset of Python 2 and Python 3.5+: - d.keys() -> list(d) - d.values() -> list(d.itervalues()) - d.items() -> list(d.iteritems()) - d.iterkeys() -> d.iterkeys() - d.itervalues() -> d.itervalues() - d.iteritems() -> d.iteritems() Possible mitigations of the additional language complexity in Python 3 created by restoring these methods included immediately deprecating them, as well as potentially hiding them from the dir() function (or perhaps even defining a way to make pydoc aware of function deprecations). However, in the case where the list output is actually desired, the end result of that proposal is actually less readable than an appropriately defined helper function, and the function and method forms of the iterator versions are pretty much equivalent from a readability perspective. So unless I've missed something critical, readily available listvalues() and listitems() helper functions look like they will improve the readability of hybrid code more than anything we could add back to the Python 3.5+ mapping API, and won't have any long-term impact on the complexity of Python 3 itself. Discussion The fact that 5 years in to the Python 3 migration we still have users considering the dict API changes a significant barrier to migration suggests that there are problems with previously recommended approaches. This PEP attempts to explore those issues and tries to isolate those cases where previous advice (such as it was) could prove problematic. My assessment (largely based on feedback from Twisted devs) is that problems are most likely to arise when attempting to use d.keys(), d.values(), and d.items() in hybrid code. While superficially it seems as though there should be cases where it is safe to ignore the semantic differences, in practice, the change from "mutable snapshot" to "dynamic view" is significant enough that it is likely better to just force the use of either list or iterator semantics for hybrid code, and leave the use of the view semantics to pure Python 3 code. This approach also creates rules that are simple enough and safe enough that it should be possible to automate them in code modernisation scripts that target the common subset of Python 2 and Python 3, just as 2to3 converts them automatically when targeting pure Python 3 code. Acknowledgements Thanks to the folks at the Twisted sprint table at PyCon for a very vigorous discussion of this idea (and several other topics), and especially to Hynek Schlawack for acting as a moderator when things got a little too heated :) Thanks also to JP Calderone and Itamar Turner-Trauring for their email feedback, as well to the participants in the python-dev review of the initial version of the PEP. Copyright This document has been placed in the public domain.
python-peps
2024-10-18T13:23:33.879655
2014-04-18T00:00:00
{ "license": "Public Domain", "url": "https://peps.python.org/pep-0469/", "authors": [ "Alyssa Coghlan" ], "pep_number": "0469", "pandoc_version": "3.5" }
0249
PEP: 249 Title: Python Database API Specification v2.0 Author: Marc-André Lemburg <[email protected]> Discussions-To: [email protected] Status: Final Type: Informational Content-Type: text/x-rst Created: 12-Apr-1999 Post-History: Replaces: 248 Introduction This API has been defined to encourage similarity between the Python modules that are used to access databases. By doing this, we hope to achieve a consistency leading to more easily understood modules, code that is generally more portable across databases, and a broader reach of database connectivity from Python. Comments and questions about this specification may be directed to the SIG for Database Interfacing with Python. For more information on database interfacing with Python and available packages see the Database Topic Guide. This document describes the Python Database API Specification 2.0 and a set of common optional extensions. The previous version 1.0 version is still available as reference, in PEP 248. Package writers are encouraged to use this version of the specification as basis for new interfaces. Module Interface Constructors Access to the database is made available through connection objects. The module must provide the following constructor for these: connect( parameters... ) Constructor for creating a connection to the database. Returns a Connection Object. It takes a number of parameters which are database dependent.[1] Globals These module globals must be defined: apilevel String constant stating the supported DB API level. Currently only the strings "1.0" and "2.0" are allowed. If not given, a DB-API 1.0 level interface should be assumed. threadsafety Integer constant stating the level of thread safety the interface supports. Possible values are: +--------------+--------------------------------------------------------+ | threadsafety | Meaning | +==============+========================================================+ | 0 | Threads may not share the module. | +--------------+--------------------------------------------------------+ | 1 | Threads may share the module, but not connections. | +--------------+--------------------------------------------------------+ | 2 | Threads may share the module and connections. | +--------------+--------------------------------------------------------+ | 3 | Threads may share the module, connections and cursors. | +--------------+--------------------------------------------------------+ Sharing in the above context means that two threads may use a resource without wrapping it using a mutex semaphore to implement resource locking. Note that you cannot always make external resources thread safe by managing access using a mutex: the resource may rely on global variables or other external sources that are beyond your control. paramstyle String constant stating the type of parameter marker formatting expected by the interface. Possible values are[2]: paramstyle Meaning ------------ ----------------------------------------------------------- qmark Question mark style, e.g. ...WHERE name=? numeric Numeric, positional style, e.g. ...WHERE name=:1 named Named style, e.g. ...WHERE name=:name format ANSI C printf format codes, e.g. ...WHERE name=%s pyformat Python extended format codes, e.g. ...WHERE name=%(name)s Exceptions The module should make all error information available through these exceptions or subclasses thereof: Warning Exception raised for important warnings like data truncations while inserting, etc. It must be a subclass of the Python Exception class[3][4]. Error Exception that is the base class of all other error exceptions. You can use this to catch all errors with one single except statement. Warnings are not considered errors and thus should not use this class as base. It must be a subclass of the Python Exception class[5]. InterfaceError Exception raised for errors that are related to the database interface rather than the database itself. It must be a subclass of Error. DatabaseError Exception raised for errors that are related to the database. It must be a subclass of Error. DataError Exception raised for errors that are due to problems with the processed data like division by zero, numeric value out of range, etc. It must be a subclass of DatabaseError. OperationalError Exception raised for errors that are related to the database's operation and not necessarily under the control of the programmer, e.g. an unexpected disconnect occurs, the data source name is not found, a transaction could not be processed, a memory allocation error occurred during processing, etc. It must be a subclass of DatabaseError. IntegrityError Exception raised when the relational integrity of the database is affected, e.g. a foreign key check fails. It must be a subclass of DatabaseError. InternalError Exception raised when the database encounters an internal error, e.g. the cursor is not valid anymore, the transaction is out of sync, etc. It must be a subclass of DatabaseError. ProgrammingError Exception raised for programming errors, e.g. table not found or already exists, syntax error in the SQL statement, wrong number of parameters specified, etc. It must be a subclass of DatabaseError. NotSupportedError Exception raised in case a method or database API was used which is not supported by the database, e.g. requesting a .rollback() on a connection that does not support transaction or has transactions turned off. It must be a subclass of DatabaseError. This is the exception inheritance layout[6][7]: Exception |__Warning |__Error |__InterfaceError |__DatabaseError |__DataError |__OperationalError |__IntegrityError |__InternalError |__ProgrammingError |__NotSupportedError Note The values of these exceptions are not defined. They should give the user a fairly good idea of what went wrong, though. Connection Objects Connection objects should respond to the following methods. Connection methods .close() Close the connection now (rather than whenever .__del__() is called). The connection will be unusable from this point forward; an Error (or subclass) exception will be raised if any operation is attempted with the connection. The same applies to all cursor objects trying to use the connection. Note that closing a connection without committing the changes first will cause an implicit rollback to be performed. .commit() Commit any pending transaction to the database. Note that if the database supports an auto-commit feature, this must be initially off. An interface method may be provided to turn it back on. Database modules that do not support transactions should implement this method with void functionality. .rollback() This method is optional since not all databases provide transaction support.[8] In case a database does provide transactions this method causes the database to roll back to the start of any pending transaction. Closing a connection without committing the changes first will cause an implicit rollback to be performed. .cursor() Return a new Cursor Object using the connection. If the database does not provide a direct cursor concept, the module will have to emulate cursors using other means to the extent needed by this specification.[9] Cursor Objects These objects represent a database cursor, which is used to manage the context of a fetch operation. Cursors created from the same connection are not isolated, i.e., any changes done to the database by a cursor are immediately visible by the other cursors. Cursors created from different connections can or can not be isolated, depending on how the transaction support is implemented (see also the connection's .rollback() and .commit() methods). Cursor Objects should respond to the following methods and attributes. Cursor attributes .description This read-only attribute is a sequence of 7-item sequences. Each of these sequences contains information describing one result column: - name - type_code - display_size - internal_size - precision - scale - null_ok The first two items (name and type_code) are mandatory, the other five are optional and are set to None if no meaningful values can be provided. This attribute will be None for operations that do not return rows or if the cursor has not had an operation invoked via the .execute*() method yet. The type_code can be interpreted by comparing it to the Type Objects specified in the section below. .rowcount This read-only attribute specifies the number of rows that the last .execute*() produced (for DQL statements like SELECT) or affected (for DML statements like UPDATE or INSERT).[10] The attribute is -1 in case no .execute*() has been performed on the cursor or the rowcount of the last operation is cannot be determined by the interface.[11] Note Future versions of the DB API specification could redefine the latter case to have the object return None instead of -1. Cursor methods .callproc( procname [, parameters ] ) (This method is optional since not all databases provide stored procedures.[12]) Call a stored database procedure with the given name. The sequence of parameters must contain one entry for each argument that the procedure expects. The result of the call is returned as modified copy of the input sequence. Input parameters are left untouched, output and input/output parameters replaced with possibly new values. The procedure may also provide a result set as output. This must then be made available through the standard .fetch*() methods. .close() Close the cursor now (rather than whenever __del__ is called). The cursor will be unusable from this point forward; an Error (or subclass) exception will be raised if any operation is attempted with the cursor. .execute(operation [, parameters]) Prepare and execute a database operation (query or command). Parameters may be provided as sequence or mapping and will be bound to variables in the operation. Variables are specified in a database-specific notation (see the module's paramstyle attribute for details).[13] A reference to the operation will be retained by the cursor. If the same operation object is passed in again, then the cursor can optimize its behavior. This is most effective for algorithms where the same operation is used, but different parameters are bound to it (many times). For maximum efficiency when reusing an operation, it is best to use the .setinputsizes() method to specify the parameter types and sizes ahead of time. It is legal for a parameter to not match the predefined information; the implementation should compensate, possibly with a loss of efficiency. The parameters may also be specified as list of tuples to e.g. insert multiple rows in a single operation, but this kind of usage is deprecated: .executemany() should be used instead. Return values are not defined. .executemany( operation, seq_of_parameters ) Prepare a database operation (query or command) and then execute it against all parameter sequences or mappings found in the sequence seq_of_parameters. Modules are free to implement this method using multiple calls to the .execute() method or by using array operations to have the database process the sequence as a whole in one call. Use of this method for an operation which produces one or more result sets constitutes undefined behavior, and the implementation is permitted (but not required) to raise an exception when it detects that a result set has been created by an invocation of the operation. The same comments as for .execute() also apply accordingly to this method. Return values are not defined. .fetchone() Fetch the next row of a query result set, returning a single sequence, or None when no more data is available.[14] An Error (or subclass) exception is raised if the previous call to .execute*() did not produce any result set or no call was issued yet. .fetchmany([size=cursor.arraysize]) Fetch the next set of rows of a query result, returning a sequence of sequences (e.g. a list of tuples). An empty sequence is returned when no more rows are available. The number of rows to fetch per call is specified by the parameter. If it is not given, the cursor's arraysize determines the number of rows to be fetched. The method should try to fetch as many rows as indicated by the size parameter. If this is not possible due to the specified number of rows not being available, fewer rows may be returned. An Error (or subclass) exception is raised if the previous call to .execute*() did not produce any result set or no call was issued yet. Note there are performance considerations involved with the size parameter. For optimal performance, it is usually best to use the .arraysize attribute. If the size parameter is used, then it is best for it to retain the same value from one .fetchmany() call to the next. .fetchall() Fetch all (remaining) rows of a query result, returning them as a sequence of sequences (e.g. a list of tuples). Note that the cursor's arraysize attribute can affect the performance of this operation. An Error (or subclass) exception is raised if the previous call to .execute*() did not produce any result set or no call was issued yet. .nextset() (This method is optional since not all databases support multiple result sets.[15]) This method will make the cursor skip to the next available set, discarding any remaining rows from the current set. If there are no more sets, the method returns None. Otherwise, it returns a true value and subsequent calls to the .fetch*() methods will return rows from the next result set. An Error (or subclass) exception is raised if the previous call to .execute*() did not produce any result set or no call was issued yet. .arraysize This read/write attribute specifies the number of rows to fetch at a time with .fetchmany(). It defaults to 1 meaning to fetch a single row at a time. Implementations must observe this value with respect to the .fetchmany() method, but are free to interact with the database a single row at a time. It may also be used in the implementation of .executemany(). .setinputsizes(sizes) This can be used before a call to .execute*() to predefine memory areas for the operation's parameters. sizes is specified as a sequence — one item for each input parameter. The item should be a Type Object that corresponds to the input that will be used, or it should be an integer specifying the maximum length of a string parameter. If the item is None, then no predefined memory area will be reserved for that column (this is useful to avoid predefined areas for large inputs). This method would be used before the .execute*() method is invoked. Implementations are free to have this method do nothing and users are free to not use it. .setoutputsize(size [, column]) Set a column buffer size for fetches of large columns (e.g. LONGs, BLOBs, etc.). The column is specified as an index into the result sequence. Not specifying the column will set the default size for all large columns in the cursor. This method would be used before the .execute*() method is invoked. Implementations are free to have this method do nothing and users are free to not use it. Type Objects and Constructors Many databases need to have the input in a particular format for binding to an operation's input parameters. For example, if an input is destined for a DATE column, then it must be bound to the database in a particular string format. Similar problems exist for "Row ID" columns or large binary items (e.g. blobs or RAW columns). This presents problems for Python since the parameters to the .execute*() method are untyped. When the database module sees a Python string object, it doesn't know if it should be bound as a simple CHAR column, as a raw BINARY item, or as a DATE. To overcome this problem, a module must provide the constructors defined below to create objects that can hold special values. When passed to the cursor methods, the module can then detect the proper type of the input parameter and bind it accordingly. A Cursor Object's description attribute returns information about each of the result columns of a query. The type_code must compare equal to one of Type Objects defined below. Type Objects may be equal to more than one type code (e.g. DATETIME could be equal to the type codes for date, time and timestamp columns; see the Implementation Hints below for details). The module exports the following constructors and singletons: Date(year, month, day) This function constructs an object holding a date value. Time(hour, minute, second) This function constructs an object holding a time value. Timestamp(year, month, day, hour, minute, second) This function constructs an object holding a time stamp value. DateFromTicks(ticks) This function constructs an object holding a date value from the given ticks value (number of seconds since the epoch; see the documentation of the standard Python time module for details). TimeFromTicks(ticks) This function constructs an object holding a time value from the given ticks value (number of seconds since the epoch; see the documentation of the standard Python time module for details). TimestampFromTicks(ticks) This function constructs an object holding a time stamp value from the given ticks value (number of seconds since the epoch; see the documentation of the standard Python time module for details). Binary(string) This function constructs an object capable of holding a binary (long) string value. STRING type This type object is used to describe columns in a database that are string-based (e.g. CHAR). BINARY type This type object is used to describe (long) binary columns in a database (e.g. LONG, RAW, BLOBs). NUMBER type This type object is used to describe numeric columns in a database. DATETIME type This type object is used to describe date/time columns in a database. ROWID type This type object is used to describe the "Row ID" column in a database. SQL NULL values are represented by the Python None singleton on input and output. Note Usage of Unix ticks for database interfacing can cause troubles because of the limited date range they cover. Implementation Hints for Module Authors - Date/time objects can be implemented as Python datetime module objects (available since Python 2.3, with a C API since 2.4) or using the mxDateTime package (available for all Python versions since 1.5.2). They both provide all necessary constructors and methods at Python and C level. - Here is a sample implementation of the Unix ticks based constructors for date/time delegating work to the generic constructors: import time def DateFromTicks(ticks): return Date(*time.localtime(ticks)[:3]) def TimeFromTicks(ticks): return Time(*time.localtime(ticks)[3:6]) def TimestampFromTicks(ticks): return Timestamp(*time.localtime(ticks)[:6]) - The preferred object type for Binary objects are the buffer types available in standard Python starting with version 1.5.2. Please see the Python documentation for details. For information about the C interface have a look at Include/bufferobject.h and Objects/bufferobject.c in the Python source distribution. - This Python class allows implementing the above type objects even though the description type code field yields multiple values for on type object: class DBAPITypeObject: def __init__(self,*values): self.values = values def __cmp__(self,other): if other in self.values: return 0 if other < self.values: return 1 else: return -1 The resulting type object compares equal to all values passed to the constructor. - Here is a snippet of Python code that implements the exception hierarchy defined above[16]: class Error(Exception): pass class Warning(Exception): pass class InterfaceError(Error): pass class DatabaseError(Error): pass class InternalError(DatabaseError): pass class OperationalError(DatabaseError): pass class ProgrammingError(DatabaseError): pass class IntegrityError(DatabaseError): pass class DataError(DatabaseError): pass class NotSupportedError(DatabaseError): pass In C you can use the PyErr_NewException(fullname, base, NULL) API to create the exception objects. Optional DB API Extensions During the lifetime of DB API 2.0, module authors have often extended their implementations beyond what is required by this DB API specification. To enhance compatibility and to provide a clean upgrade path to possible future versions of the specification, this section defines a set of common extensions to the core DB API 2.0 specification. As with all DB API optional features, the database module authors are free to not implement these additional attributes and methods (using them will then result in an AttributeError) or to raise a NotSupportedError in case the availability can only be checked at run-time. It has been proposed to make usage of these extensions optionally visible to the programmer by issuing Python warnings through the Python warning framework. To make this feature useful, the warning messages must be standardized in order to be able to mask them. These standard messages are referred to below as Warning Message. Cursor.rownumber This read-only attribute should provide the current 0-based index of the cursor in the result set or None if the index cannot be determined. The index can be seen as index of the cursor in a sequence (the result set). The next fetch operation will fetch the row indexed by .rownumber in that sequence. Warning Message: "DB-API extension cursor.rownumber used" Connection.Error, Connection.ProgrammingError, etc. All exception classes defined by the DB API standard should be exposed on the Connection objects as attributes (in addition to being available at module scope). These attributes simplify error handling in multi-connection environments. Warning Message: "DB-API extension connection.<exception> used" Cursor.connection This read-only attribute return a reference to the Connection object on which the cursor was created. The attribute simplifies writing polymorph code in multi-connection environments. Warning Message: "DB-API extension cursor.connection used" Cursor.scroll(value [, mode='relative' ]) Scroll the cursor in the result set to a new position according to mode. If mode is relative (default), value is taken as offset to the current position in the result set, if set to absolute, value states an absolute target position. An IndexError should be raised in case a scroll operation would leave the result set. In this case, the cursor position is left undefined (ideal would be to not move the cursor at all). Note This method should use native scrollable cursors, if available, or revert to an emulation for forward-only scrollable cursors. The method may raise NotSupportedError to signal that a specific operation is not supported by the database (e.g. backward scrolling). Warning Message: "DB-API extension cursor.scroll() used" Cursor.messages This is a Python list object to which the interface appends tuples (exception class, exception value) for all messages which the interfaces receives from the underlying database for this cursor. The list is cleared by all standard cursor methods calls (prior to executing the call) except for the .fetch*() calls automatically to avoid excessive memory usage and can also be cleared by executing del cursor.messages[:]. All error and warning messages generated by the database are placed into this list, so checking the list allows the user to verify correct operation of the method calls. The aim of this attribute is to eliminate the need for a Warning exception which often causes problems (some warnings really only have informational character). Warning Message: "DB-API extension cursor.messages used" Connection.messages Same as Cursor.messages except that the messages in the list are connection oriented. The list is cleared automatically by all standard connection methods calls (prior to executing the call) to avoid excessive memory usage and can also be cleared by executing del connection.messages[:]. Warning Message: "DB-API extension connection.messages used" Cursor.next() Return the next row from the currently executing SQL statement using the same semantics as .fetchone(). A StopIteration exception is raised when the result set is exhausted for Python versions 2.2 and later. Previous versions don't have the StopIteration exception and so the method should raise an IndexError instead. Warning Message: "DB-API extension cursor.next() used" Cursor.__iter__() Return self to make cursors compatible to the iteration protocol [17]. Warning Message: "DB-API extension cursor.__iter__() used" Cursor.lastrowid This read-only attribute provides the rowid of the last modified row (most databases return a rowid only when a single INSERT operation is performed). If the operation does not set a rowid or if the database does not support rowids, this attribute should be set to None. The semantics of .lastrowid are undefined in case the last executed statement modified more than one row, e.g. when using INSERT with .executemany(). Warning Message: "DB-API extension cursor.lastrowid used" Connection.autocommit Attribute to query and set the autocommit mode of the connection. Return True if the connection is operating in autocommit (non-transactional) mode. Return False if the connection is operating in manual commit (transactional) mode. Setting the attribute to True or False adjusts the connection's mode accordingly. Changing the setting from True to False (disabling autocommit) will have the database leave autocommit mode and start a new transaction. Changing from False to True (enabling autocommit) has database dependent semantics with respect to how pending transactions are handled.[18] Deprecation notice: Even though several database modules implement both the read and write nature of this attribute, setting the autocommit mode by writing to the attribute is deprecated, since this may result in I/O and related exceptions, making it difficult to implement in an async context.[19] Warning Message: "DB-API extension connection.autocommit used" Optional Error Handling Extensions The core DB API specification only introduces a set of exceptions which can be raised to report errors to the user. In some cases, exceptions may be too disruptive for the flow of a program or even render execution impossible. For these cases and in order to simplify error handling when dealing with databases, database module authors may choose to implement user definable error handlers. This section describes a standard way of defining these error handlers. Connection.errorhandler, Cursor.errorhandler Read/write attribute which references an error handler to call in case an error condition is met. The handler must be a Python callable taking the following arguments: errorhandler(connection, cursor, errorclass, errorvalue) where connection is a reference to the connection on which the cursor operates, cursor a reference to the cursor (or None in case the error does not apply to a cursor), errorclass is an error class which to instantiate using errorvalue as construction argument. The standard error handler should add the error information to the appropriate .messages attribute (Connection.messages or Cursor.messages) and raise the exception defined by the given errorclass and errorvalue parameters. If no .errorhandler is set (the attribute is None), the standard error handling scheme as outlined above, should be applied. Warning Message: "DB-API extension .errorhandler used" Cursors should inherit the .errorhandler setting from their connection objects at cursor creation time. Optional Two-Phase Commit Extensions Many databases have support for two-phase commit (TPC) which allows managing transactions across multiple database connections and other resources. If a database backend provides support for two-phase commit and the database module author wishes to expose this support, the following API should be implemented. NotSupportedError should be raised, if the database backend support for two-phase commit can only be checked at run-time. TPC Transaction IDs As many databases follow the XA specification, transaction IDs are formed from three components: - a format ID - a global transaction ID - a branch qualifier For a particular global transaction, the first two components should be the same for all resources. Each resource in the global transaction should be assigned a different branch qualifier. The various components must satisfy the following criteria: - format ID: a non-negative 32-bit integer. - global transaction ID and branch qualifier: byte strings no longer than 64 characters. Transaction IDs are created with the .xid() Connection method: .xid(format_id, global_transaction_id, branch_qualifier) Returns a transaction ID object suitable for passing to the .tpc_*() methods of this connection. If the database connection does not support TPC, a NotSupportedError is raised. The type of the object returned by .xid() is not defined, but it must provide sequence behaviour, allowing access to the three components. A conforming database module could choose to represent transaction IDs with tuples rather than a custom object. TPC Connection Methods .tpc_begin(xid) Begins a TPC transaction with the given transaction ID xid. This method should be called outside of a transaction (i.e. nothing may have executed since the last .commit() or .rollback()). Furthermore, it is an error to call .commit() or .rollback() within the TPC transaction. A ProgrammingError is raised, if the application calls .commit() or .rollback() during an active TPC transaction. If the database connection does not support TPC, a NotSupportedError is raised. .tpc_prepare() Performs the first phase of a transaction started with .tpc_begin(). A ProgrammingError should be raised if this method outside of a TPC transaction. After calling .tpc_prepare(), no statements can be executed until .tpc_commit() or .tpc_rollback() have been called. .tpc_commit([ xid ]) When called with no arguments, .tpc_commit() commits a TPC transaction previously prepared with .tpc_prepare(). If .tpc_commit() is called prior to .tpc_prepare(), a single phase commit is performed. A transaction manager may choose to do this if only a single resource is participating in the global transaction. When called with a transaction ID xid, the database commits the given transaction. If an invalid transaction ID is provided, a ProgrammingError will be raised. This form should be called outside of a transaction, and is intended for use in recovery. On return, the TPC transaction is ended. .tpc_rollback([ xid ]) When called with no arguments, .tpc_rollback() rolls back a TPC transaction. It may be called before or after .tpc_prepare(). When called with a transaction ID xid, it rolls back the given transaction. If an invalid transaction ID is provided, a ProgrammingError is raised. This form should be called outside of a transaction, and is intended for use in recovery. On return, the TPC transaction is ended. .tpc_recover() Returns a list of pending transaction IDs suitable for use with .tpc_commit(xid) or .tpc_rollback(xid). If the database does not support transaction recovery, it may return an empty list or raise NotSupportedError. Frequently Asked Questions The database SIG often sees reoccurring questions about the DB API specification. This section covers some of the issues people sometimes have with the specification. Question: How can I construct a dictionary out of the tuples returned by .fetch*(): Answer: There are several existing tools available which provide helpers for this task. Most of them use the approach of using the column names defined in the cursor attribute .description as basis for the keys in the row dictionary. Note that the reason for not extending the DB API specification to also support dictionary return values for the .fetch*() methods is that this approach has several drawbacks: - Some databases don't support case-sensitive column names or auto-convert them to all lowercase or all uppercase characters. - Columns in the result set which are generated by the query (e.g. using SQL functions) don't map to table column names and databases usually generate names for these columns in a very database specific way. As a result, accessing the columns through dictionary keys varies between databases and makes writing portable code impossible. Major Changes from Version 1.0 to Version 2.0 The Python Database API 2.0 introduces a few major changes compared to the 1.0 version. Because some of these changes will cause existing DB API 1.0 based scripts to break, the major version number was adjusted to reflect this change. These are the most important changes from 1.0 to 2.0: - The need for a separate dbi module was dropped and the functionality merged into the module interface itself. - New constructors and Type Objects were added for date/time values, the RAW Type Object was renamed to BINARY. The resulting set should cover all basic data types commonly found in modern SQL databases. - New constants (apilevel, threadsafety, paramstyle) and methods (.executemany(), .nextset()) were added to provide better database bindings. - The semantics of .callproc() needed to call stored procedures are now clearly defined. - The definition of the .execute() return value changed. Previously, the return value was based on the SQL statement type (which was hard to implement right) — it is undefined now; use the more flexible .rowcount attribute instead. Modules are free to return the old style return values, but these are no longer mandated by the specification and should be considered database interface dependent. - Class based exceptions were incorporated into the specification. Module implementors are free to extend the exception layout defined in this specification by subclassing the defined exception classes. Post-publishing additions to the DB API 2.0 specification: - Additional optional DB API extensions to the set of core functionality were specified. Open Issues Although the version 2.0 specification clarifies a lot of questions that were left open in the 1.0 version, there are still some remaining issues which should be addressed in future versions: - Define a useful return value for .nextset() for the case where a new result set is available. - Integrate the decimal module Decimal object for use as loss-less monetary and decimal interchange format. Footnotes Acknowledgements Many thanks go to Andrew Kuchling who converted the Python Database API Specification 2.0 from the original HTML format into the PEP format in 2001. Many thanks to James Henstridge for leading the discussion which led to the standardization of the two-phase commit API extensions in 2008. Many thanks to Daniele Varrazzo for converting the specification from text PEP format to ReST PEP format, which allows linking to various parts in 2012. Copyright This document has been placed in the Public Domain. [1] As a guideline the connection constructor parameters should be implemented as keyword parameters for more intuitive use and follow this order of parameters: Parameter Meaning ----------- -------------------------------- dsn Data source name as string user User name as string (optional) password Password as string (optional) host Hostname (optional) database Database name (optional) E.g. a connect could look like this: connect(dsn='myhost:MYDB', user='guido', password='234$') Also see regarding planned future additions to this list. [2] Module implementors should prefer numeric, named or pyformat over the other formats because these offer more clarity and flexibility. [3] In Python 2 and earlier versions of this PEP, StandardError was used as the base class for all DB-API exceptions. Since StandardError was removed in Python 3, database modules targeting Python 3 should use Exception as base class instead. The PEP was updated to use Exception throughout the text, to avoid confusion. The change should not affect existing modules or uses of those modules, since all DB-API error exception classes are still rooted at the Error or Warning classes. [4] In a future revision of the DB-API, the base class for Warning will likely change to the builtin Warning class. At the time of writing of the DB-API 2.0 in 1999, the warning framework in Python did not yet exist. [5] In Python 2 and earlier versions of this PEP, StandardError was used as the base class for all DB-API exceptions. Since StandardError was removed in Python 3, database modules targeting Python 3 should use Exception as base class instead. The PEP was updated to use Exception throughout the text, to avoid confusion. The change should not affect existing modules or uses of those modules, since all DB-API error exception classes are still rooted at the Error or Warning classes. [6] In Python 2 and earlier versions of this PEP, StandardError was used as the base class for all DB-API exceptions. Since StandardError was removed in Python 3, database modules targeting Python 3 should use Exception as base class instead. The PEP was updated to use Exception throughout the text, to avoid confusion. The change should not affect existing modules or uses of those modules, since all DB-API error exception classes are still rooted at the Error or Warning classes. [7] In a future revision of the DB-API, the base class for Warning will likely change to the builtin Warning class. At the time of writing of the DB-API 2.0 in 1999, the warning framework in Python did not yet exist. [8] If the database does not support the functionality required by the method, the interface should throw an exception in case the method is used. The preferred approach is to not implement the method and thus have Python generate an AttributeError in case the method is requested. This allows the programmer to check for database capabilities using the standard hasattr() function. For some dynamically configured interfaces it may not be appropriate to require dynamically making the method available. These interfaces should then raise a NotSupportedError to indicate the non-ability to perform the roll back when the method is invoked. [9] A database interface may choose to support named cursors by allowing a string argument to the method. This feature is not part of the specification, since it complicates semantics of the .fetch*() methods. [10] The term number of affected rows generally refers to the number of rows deleted, updated or inserted by the last statement run on the database cursor. Most databases will return the total number of rows that were found by the corresponding WHERE clause of the statement. Some databases use a different interpretation for UPDATEs and only return the number of rows that were changed by the UPDATE, even though the WHERE clause of the statement may have found more matching rows. Database module authors should try to implement the more common interpretation of returning the total number of rows found by the WHERE clause, or clearly document a different interpretation of the .rowcount attribute. [11] The rowcount attribute may be coded in a way that updates its value dynamically. This can be useful for databases that return usable rowcount values only after the first call to a .fetch*() method. [12] If the database does not support the functionality required by the method, the interface should throw an exception in case the method is used. The preferred approach is to not implement the method and thus have Python generate an AttributeError in case the method is requested. This allows the programmer to check for database capabilities using the standard hasattr() function. For some dynamically configured interfaces it may not be appropriate to require dynamically making the method available. These interfaces should then raise a NotSupportedError to indicate the non-ability to perform the roll back when the method is invoked. [13] The module will use the __getitem__ method of the parameters object to map either positions (integers) or names (strings) to parameter values. This allows for both sequences and mappings to be used as input. The term bound refers to the process of binding an input value to a database execution buffer. In practical terms, this means that the input value is directly used as a value in the operation. The client should not be required to "escape" the value so that it can be used — the value should be equal to the actual database value. [14] Note that the interface may implement row fetching using arrays and other optimizations. It is not guaranteed that a call to this method will only move the associated cursor forward by one row. [15] If the database does not support the functionality required by the method, the interface should throw an exception in case the method is used. The preferred approach is to not implement the method and thus have Python generate an AttributeError in case the method is requested. This allows the programmer to check for database capabilities using the standard hasattr() function. For some dynamically configured interfaces it may not be appropriate to require dynamically making the method available. These interfaces should then raise a NotSupportedError to indicate the non-ability to perform the roll back when the method is invoked. [16] In Python 2 and earlier versions of this PEP, StandardError was used as the base class for all DB-API exceptions. Since StandardError was removed in Python 3, database modules targeting Python 3 should use Exception as base class instead. The PEP was updated to use Exception throughout the text, to avoid confusion. The change should not affect existing modules or uses of those modules, since all DB-API error exception classes are still rooted at the Error or Warning classes. [17] Implementation Note: Python C extensions will have to implement the tp_iter slot on the cursor object instead of the .__iter__() method. [18] Many database modules implementing the autocommit attribute will automatically commit any pending transaction and then enter autocommit mode. It is generally recommended to explicitly .commit() or .rollback() transactions prior to changing the autocommit setting, since this is portable across database modules. [19] In a future revision of the DB-API, we are going to introduce a new method .setautocommit(value), which will allow setting the autocommit mode, and make .autocommit a read-only attribute. Additionally, we are considering to add a new standard keyword parameter autocommit to the Connection constructor. Modules authors are encouraged to add these changes in preparation for this change.
python-peps
2024-10-18T13:23:33.982604
1999-04-12T00:00:00
{ "license": "Public Domain", "url": "https://peps.python.org/pep-0249/", "authors": [ "Marc-André Lemburg" ], "pep_number": "0249", "pandoc_version": "3.5" }
0628
PEP: 628 Title: Add math.tau Version: $Revision$ Last-Modified: $Date$ Author: Alyssa Coghlan <[email protected]> Status: Final Type: Standards Track Content-Type: text/x-rst Created: 28-Jun-2011 Python-Version: 3.6 Post-History: 28-Jun-2011 Abstract In honour of Tau Day 2011, this PEP proposes the addition of the circle constant math.tau to the Python standard library. The concept of tau (τ) is based on the observation that the ratio of a circle's circumference to its radius is far more fundamental and interesting than the ratio between its circumference and diameter. It is simply a matter of assigning a name to the value 2 * pi (2π). PEP Acceptance This PEP is now accepted and math.tau will be a part of Python 3.6. Happy birthday Alyssa! The idea in this PEP has been implemented in the auspiciously named issue 12345. The Rationale for Tau pi is defined as the ratio of a circle's circumference to its diameter. However, a circle is defined by its centre point and its radius. This is shown clearly when we note that the parameter of integration to go from a circle's circumference to its area is the radius, not the diameter. If we use the diameter instead we have to divide by four to get rid of the extraneous multiplier. When working with radians, it is trivial to convert any given fraction of a circle to a value in radians in terms of tau. A quarter circle is tau/4, a half circle is tau/2, seven 25ths is 7*tau/25, etc. In contrast with the equivalent expressions in terms of pi (pi/2, pi, 14*pi/25), the unnecessary and needlessly confusing multiplication by two is gone. Other Resources I've barely skimmed the surface of the many examples put forward to point out just how much easier and more sensible many aspects of mathematics become when conceived in terms of tau rather than pi. If you don't find my specific examples sufficiently persuasive, here are some more resources that may be of interest: - Michael Hartl is the primary instigator of Tau Day in his Tau Manifesto - Bob Palais, the author of the original mathematics journal article highlighting the problems with pi has a page of resources on the topic - For those that prefer videos to written text, Pi is wrong! and Pi is (still) wrong are available on YouTube Copyright This document has been placed in the public domain.
python-peps
2024-10-18T13:23:33.993605
2011-06-28T00:00:00
{ "license": "Public Domain", "url": "https://peps.python.org/pep-0628/", "authors": [ "Alyssa Coghlan" ], "pep_number": "0628", "pandoc_version": "3.5" }
0660
PEP: 660 Title: Editable installs for pyproject.toml based builds (wheel based) Author: Daniel Holth <[email protected]>, Stéphane Bidoul <[email protected]> Sponsor: Paul Moore <[email protected]> Discussions-To: https://discuss.python.org/t/draft-pep-editable-installs-for-pep-517-style-build-backends/8510 Status: Final Type: Standards Track Topic: Packaging Content-Type: text/x-rst Created: 30-Mar-2021 Post-History: Resolution: https://discuss.python.org/t/pronouncement-on-peps-660-and-662-editable-installs/9450 Abstract This document describes a PEP 517 style method for the installation of packages in editable mode. Motivation Python programmers want to be able to develop packages without having to install (i.e. copy) them into site-packages, for example, by working in a checkout of the source repository. While this can be done by adding the relevant source directories to PYTHONPATH, setuptools provides the setup.py develop mechanism that makes the process easier, and also installs dependencies and entry points such as console scripts. pip exposes this mechanism via its pip install --editable option. The installation of projects in such a way that the python code being imported remains in the source directory is known as the editable installation mode. Now that PEP 517 provides a mechanism to create alternatives to setuptools, and decouple installation front ends from build backends, we need a new mechanism to install packages in editable mode. Rationale PEP 517 deferred "Editable installs", meaning non-setup.py distributions lacked that feature. The only way to retain editable installs for these distributions was to provide a compatible setup.py develop implementation. By defining an editable hook other build frontends gain parity with setup.py. Terminology and goals The editable installation mode implies that the source code of the project being installed is available in a local directory. Once the project is installed in editable mode, users expect that changes to the project python code in the local source tree become effective without the need of a new installation step. Some kind of changes, such as the addition or modification of entry points, or the addition of new dependencies, require a new installation step to become effective. These changes are typically made in build backend configuration files (such as pyproject.toml), so it is consistent with the general user expectation that python source code is imported from the source tree. The modification of non-python source code such a C extension modules obviously require a compilation and/or installation step to become effective. The exact steps to perform will remain specific to the build backend used. When a project is installed in editable mode, users expect the installation to behave identically as a regular installation. In particular the code must be importable by other code, and metadata must be available to standard mechanisms such as importlib.metadata. Depending on the way build backends implement this specification, some minor differences may be visible such as the presence of additional files that are in the source tree and would not be part of a regular install. Build backends are encouraged to document such potential differences. The Mechanism This PEP adds three optional hooks to the PEP 517 backend interface. These hooks are used to build a wheel that, when installed, allows that distribution to be imported from its source folder. build_editable def build_editable(wheel_directory, config_settings=None, metadata_directory=None): ... Must build a .whl file, and place it in the specified wheel_directory. It must return the basename (not the full path) of the .whl file it creates, as a unicode string. May do an in-place build of the distribution as a side effect so that any extension modules or other built artifacts are ready to be used. The .whl file must comply with the Wheel binary file format specification (PEP 427). In particular it must contain a compliant .dist-info directory. Metadata must be identical as the one that would have been produced by build_wheel or prepare_metadata_for_build_wheel, except for Requires-Dist which may differ slightly as explained below. Build-backends must produce wheels that have the same dependencies (Requires-Dist metadata) as wheels produced by the build_wheel hook, with the exception that they can add dependencies necessary for their editable mechanism to function at runtime (such as editables). The filename for the "editable" wheel needs to be PEP 427 compliant too. It does not need to use the same tags as build_wheel but it must be tagged as compatible with the system. If the build frontend has previously called prepare_metadata_for_build_editable and depends on the wheel resulting from this call to have metadata matching this earlier call, then it should provide the path to the created .dist-info directory as the metadata_directory argument. If this argument is provided, then build_editable MUST produce a wheel with identical metadata. The directory passed in by the build frontend MUST be identical to the directory created by prepare_metadata_for_build_editable, including any unrecognized files it created. An "editable" wheel uses the wheel format not for distribution but as ephemeral communication between the build system and the front end. This avoids having the build backend install anything directly. This wheel must not be exposed to end users, nor cached, nor distributed. get_requires_for_build_editable def get_requires_for_build_editable(config_settings=None): ... This hook MUST return an additional list of strings containing PEP 508 dependency specifications, above and beyond those specified in the pyproject.toml file, to be installed when calling the build_editable hooks. If not defined, the default implementation is equivalent to return []. prepare_metadata_for_build_editable def prepare_metadata_for_build_editable(metadata_directory, config_settings=None): ... Must create a .dist-info directory containing wheel metadata inside the specified metadata_directory (i.e., creates a directory like {metadata_directory}/{package}-{version}.dist-info/). This directory MUST be a valid .dist-info directory as defined in the wheel specification, except that it need not contain RECORD or signatures. The hook MAY also create other files inside this directory, and a build frontend MUST preserve, but otherwise ignore, such files; the intention here is that in cases where the metadata depends on build-time decisions, the build backend may need to record these decisions in some convenient format for re-use by the actual wheel-building step. This must return the basename (not the full path) of the .dist-info directory it creates, as a unicode string. If a build frontend needs this information and the method is not defined, it should call build_editable and look at the resulting metadata directly. What to put in the wheel Build backends must populate the generated wheel with files that when installed will result in an editable install. Build backends may use different techniques to achieve the goals of an editable install. This section provides examples and is not normative. - Build backends may choose to place a .pth file at the root of the .whl file, containing the root directory of the source tree. This approach is simple but not very precise, although it may be considered good enough (especially when using the src layout) and is similar to what setup.py develop currently does. - The editables library shows how to build proxy modules that provide a high quality editable installation. It accepts a list of modules to include, and hide. When imported, these proxy modules replace themselves with the code from the source tree. Path-based methods make all scripts under a path importable, often including the project's own setup.py and other scripts that would not be part of a normal installation. The proxy strategy can achieve a higher level of fidelity than path-based methods. - Symbolic links are another useful mechanism to realize editable installs. Since, at the time this writing, the wheel specification does not support symbolic links, they are not directly usable to set-up symbolic links in the target environment. It is however possible for the backend to create a symlink structure in some build directory of the source tree, and add that directory to the python path via a .pth file in the "editable" wheel. If some files linked in this manner depend on python implementation or version, ABI or platform, care must be taken to generate the link structure in different directories depending on compatibility tags, so the same project tree can be installed in editable mode in multiple environments. Frontend requirements Frontends must install "editable" wheels in the same way as regular wheels. This also means uninstallation of editables does not require any special treatment. Frontends must create a direct_url.json file in the .dist-info directory of the installed distribution, in compliance with PEP 610. The url value must be a file:// url pointing to the project directory (i.e. the directory containing pyproject.toml), and the dir_info value must be {'editable': true}. Frontends must execute get_requires_for_build_editable hooks in an environment which contains the bootstrap requirements specified in the pyproject.toml file. Frontends must execute the prepare_metadata_for_build_editable and build_editable hooks in an environment which contains the bootstrap requirements from pyproject.toml and those specified by the get_requires_for_build_editable hook. Frontends must not expose the wheel obtained from build_editable to end users. The wheel must be discarded after installation and must not be cached nor distributed. Limitations With regard to the wheel .data directory, this PEP focuses on making the purelib and platlib categories (installed into site-packages) "editable". It does not make special provision for the other categories such as headers, data and scripts. Package authors are encouraged to use console_scripts, make their scripts tiny wrappers around library functionality, or manage these from the source checkout during development. Prototypes At the time of writing this PEP, several prototype implementations are available in various frontends and backends. We provide links below to illustrate possible approaches. Frontends: - pip (pull request) Build backends: - enscons (pull request 1, pull request 2) - flit (pull request) - hatchling (sdist) - pdm (pull request) - setuptools (setuptools_pep660 repository) Rejected ideas editable local version identifier The ideas of having build backends append or modify the local version identifier to include the editable string has been rejected because it would not satisfy == version speicifier that include the local version identifier. In other words pkg==1.0+local is not satisfied by version 1.0+local.editable. Virtual wheel Another approach was proposed in PEP 662, where the build backend returns a mapping from source files and directories to the installed layout. It is then up to the installer frontend to realize the editable installation by whatever means it deems adequate for its users. In terms of capabilities, both proposals provide the core "editable" feature. The key difference is that PEP 662 leaves it to the frontend to decide how the editable installation will be realized, while with this PEP, the choice must be made by the backend. Both approaches can in principle provide several editable installation methods for a given project, and let the developer choose one at install time. At the time of writing this PEP, it is clear that the community has a wide range of theoretical and practical expectations about editable installs. The reality is that the only one there is wide experience with is path insertion via .pth (i.e. what setup.py develop does). We believe that PEP 660 better addresses these "unknown unknowns" today in the most reliable way, by letting project authors select the backend or implement the method that provides the editable mechanism that best suit their requirements, and test it works correctly. Since the frontend has no latitude in how to install the "editable" wheel, in case of issue, there is only one place to investigate: the build backend. With PEP 662, issues need to be investigated in the frontend, the backend and possiblty the specification. There is also a high probability that different frontends, implementing the specification in different ways, will produce installations that behave differently than project authors intended, creating confusion, or worse, projects that only work with specific frontends or IDEs. Unpacked wheel A prototype was made that created an unpacked wheel in a temporary directory, to be copied to the target environment by the frontend. This approach was not pursued because a wheel archive is easy to create for the backend, and using a wheel as communication mechanism is a better fit with the PEP 517 philosophy, and therefore keeps things simpler for the frontend. References Copyright This document is placed in the public domain or under the CC0-1.0-Universal license, whichever is more permissive. Local Variables: mode: indented-text indent-tabs-mode: nil sentence-end-double-space: t fill-column: 70 coding: utf-8 End:
python-peps
2024-10-18T13:23:34.009983
2021-03-30T00:00:00
{ "license": "Public Domain", "url": "https://peps.python.org/pep-0660/", "authors": [ "Daniel Holth", "Stéphane Bidoul" ], "pep_number": "0660", "pandoc_version": "3.5" }
3113
PEP: 3113 Title: Removal of Tuple Parameter Unpacking Version: $Revision$ Last-Modified: $Date$ Author: Brett Cannon <[email protected]> Status: Final Type: Standards Track Content-Type: text/x-rst Created: 02-Mar-2007 Python-Version: 3.0 Post-History: Abstract Tuple parameter unpacking is the use of a tuple as a parameter in a function signature so as to have a sequence argument automatically unpacked. An example is: def fxn(a, (b, c), d): pass The use of (b, c) in the signature requires that the second argument to the function be a sequence of length two (e.g., [42, -13]). When such a sequence is passed it is unpacked and has its values assigned to the parameters, just as if the statement b, c = [42, -13] had been executed in the parameter. Unfortunately this feature of Python's rich function signature abilities, while handy in some situations, causes more issues than they are worth. Thus this PEP proposes their removal from the language in Python 3.0. Why They Should Go Introspection Issues Python has very powerful introspection capabilities. These extend to function signatures. There are no hidden details as to what a function's call signature is. In general it is fairly easy to figure out various details about a function's signature by viewing the function object and various attributes on it (including the function's func_code attribute). But there is great difficulty when it comes to tuple parameters. The existence of a tuple parameter is denoted by its name being made of a . and a number in the co_varnames attribute of the function's code object. This allows the tuple argument to be bound to a name that only the bytecode is aware of and cannot be typed in Python source. But this does not specify the format of the tuple: its length, whether there are nested tuples, etc. In order to get all of the details about the tuple from the function one must analyse the bytecode of the function. This is because the first bytecode in the function literally translates into the tuple argument being unpacked. Assuming the tuple parameter is named .1 and is expected to unpack to variables spam and monty (meaning it is the tuple (spam, monty)), the first bytecode in the function will be for the statement spam, monty = .1. This means that to know all of the details of the tuple parameter one must look at the initial bytecode of the function to detect tuple unpacking for parameters formatted as \.\d+ and deduce any and all information about the expected argument. Bytecode analysis is how the inspect.getargspec function is able to provide information on tuple parameters. This is not easy to do and is burdensome on introspection tools as they must know how Python bytecode works (an otherwise unneeded burden as all other types of parameters do not require knowledge of Python bytecode). The difficulty of analysing bytecode not withstanding, there is another issue with the dependency on using Python bytecode. IronPython[1] does not use Python's bytecode. Because it is based on the .NET framework it instead stores MSIL[2] in func_code.co_code attribute of the function. This fact prevents the inspect.getargspec function from working when run under IronPython. It is unknown whether other Python implementations are affected but is reasonable to assume if the implementation is not just a re-implementation of the Python virtual machine. No Loss of Abilities If Removed As mentioned in Introspection Issues, to handle tuple parameters the function's bytecode starts with the bytecode required to unpack the argument into the proper parameter names. This means that there is no special support required to implement tuple parameters and thus there is no loss of abilities if they were to be removed, only a possible convenience (which is addressed in Why They Should (Supposedly) Stay). The example function at the beginning of this PEP could easily be rewritten as: def fxn(a, b_c, d): b, c = b_c pass and in no way lose functionality. Exception To The Rule When looking at the various types of parameters that a Python function can have, one will notice that tuple parameters tend to be an exception rather than the rule. Consider PEP 3102 (keyword-only arguments) and PEP 3107 (function annotations). Both PEPs have been accepted and introduce new functionality within a function's signature. And yet for both PEPs the new feature cannot be applied to tuple parameters as a whole. PEP 3102 has no support for tuple parameters at all (which makes sense as there is no way to reference a tuple parameter by name). PEP 3107 allows annotations for each item within the tuple (e.g., (x:int, y:int)), but not the whole tuple (e.g., (x, y):int). The existence of tuple parameters also places sequence objects separately from mapping objects in a function signature. There is no way to pass in a mapping object (e.g., a dict) as a parameter and have it unpack in the same fashion as a sequence does into a tuple parameter. Uninformative Error Messages Consider the following function: def fxn((a, b), (c, d)): pass If called as fxn(1, (2, 3)) one is given the error message TypeError: unpack non-sequence. This error message in no way tells you which tuple was not unpacked properly. There is also no indication that this was a result that occurred because of the arguments. Other error messages regarding arguments to functions explicitly state its relation to the signature: TypeError: fxn() takes exactly 2 arguments (0 given), etc. Little Usage While an informal poll of the handful of Python programmers I know personally and from the PyCon 2007 sprint indicates a huge majority of people do not know of this feature and the rest just do not use it, some hard numbers is needed to back up the claim that the feature is not heavily used. Iterating over every line in Python's code repository in the Lib/ directory using the regular expression ^\s*def\s*\w+\s*\( to detect function and method definitions there were 22,252 matches in the trunk. Tacking on .*,\s*\( to find def statements that contained a tuple parameter, only 41 matches were found. This means that for def statements, only 0.18% of them seem to use a tuple parameter. Why They Should (Supposedly) Stay Practical Use In certain instances tuple parameters can be useful. A common example is code that expects a two-item tuple that represents a Cartesian point. While true it is nice to be able to have the unpacking of the x and y coordinates for you, the argument is that this small amount of practical usefulness is heavily outweighed by other issues pertaining to tuple parameters. And as shown in No Loss Of Abilities If Removed, their use is purely practical and in no way provide a unique ability that cannot be handled in other ways very easily. Self-Documentation For Parameters It has been argued that tuple parameters provide a way of self-documentation for parameters that are expected to be of a certain sequence format. Using our Cartesian point example from Practical Use, seeing (x, y) as a parameter in a function makes it obvious that a tuple of length two is expected as an argument for that parameter. But Python provides several other ways to document what parameters are for. Documentation strings are meant to provide enough information needed to explain what arguments are expected. Tuple parameters might tell you the expected length of a sequence argument, it does not tell you what that data will be used for. One must also read the docstring to know what other arguments are expected if not all parameters are tuple parameters. Function annotations (which do not work with tuple parameters) can also supply documentation. Because annotations can be of any form, what was once a tuple parameter can be a single argument parameter with an annotation of tuple, tuple(2), Cartesian point, (x, y), etc. Annotations provide great flexibility for documenting what an argument is expected to be for a parameter, including being a sequence of a certain length. Transition Plan To transition Python 2.x code to 3.x where tuple parameters are removed, two steps are suggested. First, the proper warning is to be emitted when Python's compiler comes across a tuple parameter in Python 2.6. This will be treated like any other syntactic change that is to occur in Python 3.0 compared to Python 2.6. Second, the 2to3 refactoring tool[3] will gain a fixer [4] for translating tuple parameters to being a single parameter that is unpacked as the first statement in the function. The name of the new parameter will be changed. The new parameter will then be unpacked into the names originally used in the tuple parameter. This means that the following function: def fxn((a, (b, c))): pass will be translated into: def fxn(a_b_c): (a, (b, c)) = a_b_c pass As tuple parameters are used by lambdas because of the single expression limitation, they must also be supported. This is done by having the expected sequence argument bound to a single parameter and then indexing on that parameter: lambda (x, y): x + y will be translated into: lambda x_y: x_y[0] + x_y[1] References Copyright This document has been placed in the public domain. Local Variables: mode: indented-text indent-tabs-mode: nil sentence-end-double-space: t fill-column: 70 coding: utf-8 End: [1] IronPython (http://www.codeplex.com/Wiki/View.aspx?ProjectName=IronPython) [2] Microsoft Intermediate Language (http://msdn.microsoft.com/library/en-us/cpguide/html/cpconmicrosoftintermediatelanguagemsil.asp?frame=true) [3] 2to3 refactoring tool (http://svn.python.org/view/sandbox/trunk/2to3/) [4] 2to3 fixer (http://svn.python.org/view/sandbox/trunk/2to3/fixes/fix_tuple_params.py)
python-peps
2024-10-18T13:23:34.021760
2007-03-02T00:00:00
{ "license": "Public Domain", "url": "https://peps.python.org/pep-3113/", "authors": [ "Brett Cannon" ], "pep_number": "3113", "pandoc_version": "3.5" }
0332
PEP: 332 Title: Byte vectors and String/Unicode Unification Version: $Revision$ Last-Modified: $Date$ Author: Skip Montanaro <[email protected]> Status: Rejected Type: Standards Track Content-Type: text/x-rst Created: 11-Aug-2004 Python-Version: 2.5 Post-History: Abstract This PEP outlines the introduction of a raw bytes sequence object and the unification of the current str and unicode objects. Rejection Notice This PEP is rejected in this form. The author has expressed lack of time to continue to shepherd it, and discussion on python-dev has moved to a slightly different proposal which will (eventually) be written up as a new PEP. See the thread starting at https://mail.python.org/pipermail/python-dev/2006-February/060930.html. Rationale Python's current string objects are overloaded. They serve both to hold ASCII and non-ASCII character data and to also hold sequences of raw bytes which have no reasonable interpretation as displayable character sequences. This overlap hasn't been a big problem in the past, but as Python moves closer to requiring source code to be properly encoded, the use of strings to represent raw byte sequences will be more problematic. In addition, as Python's Unicode support has improved, it's easier to consider strings as ASCII-encoded Unicode objects. Proposed Implementation The number in parentheses indicates the Python version in which the feature will be introduced. - Add a bytes builtin which is just a synonym for str. (2.5) - Add a b"..." string literal which is equivalent to raw string literals, with the exception that values which conflict with the source encoding of the containing file not generate warnings. (2.5) - Warn about the use of variables named "bytes". (2.5 or 2.6) - Introduce a bytes builtin which refers to a sequence distinct from the str type. (2.6) - Make str a synonym for unicode. (3.0) Bytes Object API TBD. Issues - Can this be accomplished before Python 3.0? - Should bytes objects be mutable or immutable? (Guido seems to like them to be mutable.) Copyright This document has been placed in the public domain. Local Variables: mode: indented-text indent-tabs-mode: nil sentence-end-double-space: t fill-column: 70 End:
python-peps
2024-10-18T13:23:34.028984
2004-08-11T00:00:00
{ "license": "Public Domain", "url": "https://peps.python.org/pep-0332/", "authors": [ "Skip Montanaro" ], "pep_number": "0332", "pandoc_version": "3.5" }
0500
PEP: 500 Title: A protocol for delegating datetime methods to their tzinfo implementations Version: $Revision$ Last-Modified: $Date$ Author: Alexander Belopolsky <[email protected]>, Tim Peters <[email protected]> Discussions-To: [email protected] Status: Rejected Type: Standards Track Content-Type: text/x-rst Requires: 495 Created: 08-Aug-2015 Resolution: https://mail.python.org/pipermail/datetime-sig/2015-August/000354.html Abstract This PEP specifies a new protocol (PDDM - "A Protocol for Delegating Datetime Methods") that can be used by concrete implementations of the datetime.tzinfo interface to override aware datetime arithmetics, formatting and parsing. We describe changes to the datetime.datetime class to support the new protocol and propose a new abstract class datetime.tzstrict that implements parts of this protocol necessary to make aware datetime instances to follow "strict" arithmetic rules. Rationale As of Python 3.5, aware datetime instances that share a tzinfo object follow the rules of arithmetics that are induced by a simple bijection between (year, month, day, hour, minute, second, microsecond) 7-tuples and large integers. In this arithmetics, the difference between YEAR-11-02T12:00 and YEAR-11-01T12:00 is always 24 hours, even though in the US/Eastern timezone, for example, there are 25 hours between 2014-11-01T12:00 and 2014-11-02T12:00 because the local clocks were rolled back one hour at 2014-11-02T02:00, introducing an extra hour in the night between 2014-11-01 and 2014-11-02. Many business applications require the use of Python's simplified view of local dates. No self-respecting car rental company will charge its customers more for a week that straddles the end of DST than for any other week or require that they return the car an hour early. Therefore, changing the current rules for aware datetime arithmetics will not only create a backward compatibility nightmare, it will eliminate support for legitimate and common use cases. Since it is impossible to choose universal rules for local time arithmetics, we propose to delegate implementation of those rules to the classes that implement datetime.tzinfo interface. With such delegation in place, users will be able to choose between different arithmetics by simply picking instances of different classes for the value of tzinfo. Protocol Subtraction of datetime A tzinfo subclass supporting the PDDM, may define a method called __datetime_diff__ that should take two datetime.datetime instances and return a datetime.timedelta instance representing the time elapsed from the time represented by the first datetime instance to another. Addition A tzinfo subclass supporting the PDDM, may define a method called __datetime_add__ that should take two arguments--a datetime and a timedelta instances--and return a datetime instance. Subtraction of timedelta A tzinfo subclass supporting the PDDM, may define a method called __datetime_sub__ that should take two arguments--a datetime and a timedelta instances--and return a datetime instance. Formatting A tzinfo subclass supporting the PDDM, may define methods called __datetime_isoformat__ and __datetime_strftime__. The __datetime_isoformat__ method should take a datetime instance and an optional separator and produce a string representation of the given datetime instance. The __datetime_strftime__ method should take a datetime instance and a format string and produce a string representation of the given datetime instance formatted according to the given format. Parsing A tzinfo subclass supporting the PDDM, may define a class method called __datetime_strptime__ and register the "canonical" names of the timezones that it implements with a registry. TODO Describe a registry. Changes to datetime methods Subtraction class datetime: def __sub__(self, other): if isinstance(other, datetime): try: self_diff = self.tzinfo.__datetime_diff__ except AttributeError: self_diff = None try: other_diff = self.tzinfo.__datetime_diff__ except AttributeError: other_diff = None if self_diff is not None: if self_diff is not other_diff and self_diff.__func__ is not other_diff.__func__: raise ValueError("Cannot find difference of two datetimes with " "different tzinfo.__datetime_diff__ implementations.") return self_diff(self, other) elif isinstance(other, timedelta): try: sub = self.tzinfo.__datetime_sub__ except AttributeError: pass else: return sub(self, other) return self + -other else: return NotImplemented # current implementation Addition Addition of a timedelta to a datetime instance will be delegated to the self.tzinfo.__datetime_add__ method whenever it is defined. Strict arithmetics A new abstract subclass of datetime.tzinfo class called datetime.tzstrict will be added to the datetime module. This subclass will not implement the utcoffset(), tzname() or dst() methods, but will implement some of the methods of the PDDM. The PDDM methods implemented by tzstrict will be equivalent to the following: class tzstrict(tzinfo): def __datetime_diff__(self, dt1, dt2): utc_dt1 = dt1.astimezone(timezone.utc) utc_dt2 = dt2.astimezone(timezone.utc) return utc_dt2 - utc_dt1 def __datetime_add__(self, dt, delta): utc_dt = dt.astimezone(timezone.utc) return (utc_dt + delta).astimezone(self) def __datetime_sub__(self, dt, delta): utc_dt = dt.astimezone(timezone.utc) return (utc_dt - delta).astimezone(self) Parsing and formatting Datetime methods strftime and isoformat will delegate to the namesake methods of their tzinfo members whenever those methods are defined. When the datetime.strptime method is given a format string that contains a %Z instruction, it will lookup the tzinfo implementation in the registry by the given timezone name and call its __datetime_strptime__ method. Applications This PEP will enable third party implementation of many different timekeeping schemes including: - Julian / Microsoft Excel calendar. - "Right" timezones with the leap second support. - French revolutionary calendar (with a lot of work). Copyright This document has been placed in the public domain.
python-peps
2024-10-18T13:23:34.038003
2015-08-08T00:00:00
{ "license": "Public Domain", "url": "https://peps.python.org/pep-0500/", "authors": [ "Alexander Belopolsky", "Tim Peters" ], "pep_number": "0500", "pandoc_version": "3.5" }
0449
PEP: 449 Title: Removal of the PyPI Mirror Auto Discovery and Naming Scheme Version: $Revision$ Last-Modified: $Date$ Author: Donald Stufft <[email protected]> BDFL-Delegate: Richard Jones <[email protected]> Discussions-To: [email protected] Status: Final Type: Process Topic: Packaging Content-Type: text/x-rst Created: 04-Aug-2013 Post-History: 04-Aug-2013 Replaces: 381 Resolution: https://mail.python.org/pipermail/distutils-sig/2013-August/022518.html Abstract This PEP provides a path to deprecate and ultimately remove the auto discovery of PyPI mirrors as well as the hard coded naming scheme which requires delegating a domain name under pypi.python.org to a third party. Rationale The PyPI mirroring infrastructure (defined in PEP 381) provides a means to mirror the content of PyPI used by the automatic installers. It also provides a method for auto discovery of mirrors and a consistent naming scheme. There are a number of problems with the auto discovery protocol and the naming scheme: - They give control over a *.python.org domain name to a third party, allowing that third party to set or read cookies on the pypi.python.org and python.org domain name. - The use of a sub domain of pypi.python.org means that the mirror operators will never be able to get a SSL certificate of their own, and giving them one for a python.org domain name is unlikely to happen. - The auto discovery uses an unauthenticated protocol (DNS). - The lack of a TLS certificate on these domains means that clients can not be sure that they have not been a victim of DNS poisoning or a MITM attack. - The auto discovery protocol was designed to enable a client to automatically select a mirror for use. This is no longer a requirement because the CDN that PyPI is now using a globally distributed network of servers which will automatically select one close to the client without any effort on the clients part. - The auto discovery protocol and use of the consistent naming scheme has only ever been implemented by one installer (pip), and its implementation, besides being insecure, has serious issues with performance and is slated for removal with its next release (1.5). - While there are provisions in PEP 381 that would solve some of these issues for a dedicated client it would not solve the issues that affect a users browser. Additionally these provisions have not been implemented by any installer to date. Due to the number of issues, some of them very serious, and the CDN which provides most of the benefit of the auto discovery and consistent naming scheme this PEP proposes to first deprecate and then remove the [a..z].pypi.python.org names for mirrors and the last.pypi.python.org name for the auto discovery protocol. The ability to mirror and the method of mirror will not be affected and will continue to exist as written in PEP 381. Operators of existing mirrors are encouraged to acquire their own domains and certificates to use for their mirrors if they wish to continue hosting them. Plan for Deprecation & Removal Immediately upon acceptance of this PEP documentation on PyPI will be updated to reflect the deprecated nature of the official public mirrors and will direct users to external resources like http://www.pypi-mirrors.org/ to discover unofficial public mirrors if they wish to use one. Mirror operators, if they wish to continue operating their mirror, should acquire a domain name to represent their mirror and, if they are able, a TLS certificate. Once they have acquired a domain they should redirect their assigned N.pypi.python.org domain name to their new domain. On Feb 15th, 2014 the DNS entries for [a..z].pypi.python.org and last.pypi.python.org will be removed. At any time prior to Feb 15th, 2014 a mirror operator may request that their domain name be reclaimed by PyPI and pointed back at the master. Why Feb 15th, 2014 The most critical decision of this PEP is the final cut off date. If the date is too soon then it needlessly punishes people by forcing them to drop everything to update their deployment scripts. If the date is too far away then the extended period of time does not help with the migration effort and merely puts off the migration until a later date. The date of Feb 15th, 2014 has been chosen because it is roughly 6 months from the date of the PEP. This should ensure a lengthy period of time to enable people to update their deployment procedures to point to the new domains names without merely padding the cut off date. Why the DNS entries must be removed While it would be possible to simply reclaim the domain names used in mirror and direct them back at PyPI in order to prevent users from needing to update configurations to point away from those domains this has a number of issues. - Anyone who currently has these names hard coded in their configuration has them hard coded as HTTP. This means that by allowing these names to continue resolving we make it simple for a MITM operator to attack users by rewriting the redirect to HTTPS prior to giving it to the client. - The overhead of maintaining several domains pointing at PyPI has proved troublesome for the small number of N.pypi.python.org domains that have already been reclaimed. They oftentimes get mis-configured when things change on the service which often leaves them broken for months at a time until somebody notices. By leaving them in we leave users of these domains open to random breakages which are less likely to get caught or noticed. - People using these domains have explicitly chosen to use them for one reason or another. One such reason may be because they do not wish to deploy from a host located in a particular country. If these domains continue to resolve but do not point at their existing locations we have silently removed this choice from the existing users of those domains. That being said, removing the entries will require users who have modified their configuration to either point back at the master (PyPI) or select a new mirror name to point at. This is regarded as a regrettable requirement to protect PyPI itself and the users of the mirrors from the attacks outlined above or, at the very least, require them to make an informed decision about the insecurity. Public or Private Mirrors The mirroring protocol will continue to exist as defined in PEP 381 and people are encouraged to host public and private mirrors if they so desire. The recommended mirroring client is Bandersnatch. Copyright This document has been placed in the public domain. Local Variables: mode: indented-text indent-tabs-mode: nil sentence-end-double-space: t fill-column: 70 coding: utf-8 End:
python-peps
2024-10-18T13:23:34.046910
2013-08-04T00:00:00
{ "license": "Public Domain", "url": "https://peps.python.org/pep-0449/", "authors": [ "Donald Stufft" ], "pep_number": "0449", "pandoc_version": "3.5" }
0597
PEP: 597 Title: Add optional EncodingWarning Last-Modified: 07-Aug-2021 Author: Inada Naoki <[email protected]> Status: Final Type: Standards Track Content-Type: text/x-rst Created: 05-Jun-2019 Python-Version: 3.10 Abstract Add a new warning category EncodingWarning. It is emitted when the encoding argument to open() is omitted and the default locale-specific encoding is used. The warning is disabled by default. A new -X warn_default_encoding command-line option and a new PYTHONWARNDEFAULTENCODING environment variable can be used to enable it. A "locale" argument value for encoding is added too. It explicitly specifies that the locale encoding should be used, silencing the warning. Motivation Using the default encoding is a common mistake Developers using macOS or Linux may forget that the default encoding is not always UTF-8. For example, using long_description = open("README.md").read() in setup.py is a common mistake. Many Windows users cannot install such packages if there is at least one non-ASCII character (e.g. emoji, author names, copyright symbols, and the like) in their UTF-8-encoded README.md file. Of the 4000 most downloaded packages from PyPI, 489 use non-ASCII characters in their README, and 82 fail to install from source on non-UTF-8 locales due to not specifying an encoding for a non-ASCII file.[1] Another example is logging.basicConfig(filename="log.txt"). Some users might expect it to use UTF-8 by default, but the locale encoding is actually what is used.[2] Even Python experts may assume that the default encoding is UTF-8. This creates bugs that only happen on Windows; see[3],[4],[5], and[6] for example. Emitting a warning when the encoding argument is omitted will help find such mistakes. Explicit way to use locale-specific encoding open(filename) isn't explicit about which encoding is expected: - If ASCII is assumed, this isn't a bug, but may result in decreased performance on Windows, particularly with non-Latin-1 locale encodings - If UTF-8 is assumed, this may be a bug or a platform-specific script - If the locale encoding is assumed, the behavior is as expected (but could change if future versions of Python modify the default) From this point of view, open(filename) is not readable code. encoding=locale.getpreferredencoding(False) can be used to specify the locale encoding explicitly, but it is too long and easy to misuse (e.g. one can forget to pass False as its argument). This PEP provides an explicit way to specify the locale encoding. Prepare to change the default encoding to UTF-8 Since UTF-8 has become the de-facto standard text encoding, we might default to it for opening files in the future. However, such a change will affect many applications and libraries. If we start emitting DeprecationWarning everywhere the encoding argument is omitted, it will be too noisy and painful. Although this PEP doesn't propose changing the default encoding, it will help enable that change by: - Reducing the number of omitted encoding arguments in libraries before we start emitting a DeprecationWarning by default. - Allowing users to pass encoding="locale" to suppress the current warning and any DeprecationWarning added in the future, as well as retaining consistent behavior if later Python versions change the default, ensuring support for any Python version >=3.10. Specification EncodingWarning Add a new EncodingWarning warning class as a subclass of Warning. It is emitted when the encoding argument is omitted and the default locale-specific encoding is used. Options to enable the warning The -X warn_default_encoding option and the PYTHONWARNDEFAULTENCODING environment variable are added. They are used to enable EncodingWarning. sys.flags.warn_default_encoding is also added. The flag is true when EncodingWarning is enabled. When the flag is set, io.TextIOWrapper(), open() and other modules using them will emit EncodingWarning when the encoding argument is omitted. Since EncodingWarning is a subclass of Warning, they are shown by default (if the warn_default_encoding flag is set), unlike DeprecationWarning. encoding="locale" io.TextIOWrapper will accept "locale" as a valid argument to encoding. It has the same meaning as the current encoding=None, except that io.TextIOWrapper doesn't emit EncodingWarning when encoding="locale" is specified. io.text_encoding() io.text_encoding() is a helper for functions with an encoding=None parameter that pass it to io.TextIOWrapper() or open(). A pure Python implementation will look like this: def text_encoding(encoding, stacklevel=1): """A helper function to choose the text encoding. When *encoding* is not None, just return it. Otherwise, return the default text encoding (i.e. "locale"). This function emits an EncodingWarning if *encoding* is None and sys.flags.warn_default_encoding is true. This function can be used in APIs with an encoding=None parameter that pass it to TextIOWrapper or open. However, please consider using encoding="utf-8" for new APIs. """ if encoding is None: if sys.flags.warn_default_encoding: import warnings warnings.warn( "'encoding' argument not specified.", EncodingWarning, stacklevel + 2) encoding = "locale" return encoding For example, pathlib.Path.read_text() can use it like this: def read_text(self, encoding=None, errors=None): encoding = io.text_encoding(encoding) with self.open(mode='r', encoding=encoding, errors=errors) as f: return f.read() By using io.text_encoding(), EncodingWarning is emitted for the caller of read_text() instead of read_text() itself. Affected standard library modules Many standard library modules will be affected by this change. Most APIs accepting encoding=None will use io.text_encoding() as written in the previous section. Where using the locale encoding as the default encoding is reasonable, encoding="locale" will be used instead. For example, the subprocess module will use the locale encoding as the default for pipes. Many tests use open() without encoding specified to read ASCII text files. They should be rewritten with encoding="ascii". Rationale Opt-in warning Although DeprecationWarning is suppressed by default, always emitting DeprecationWarning when the encoding argument is omitted would be too noisy. Noisy warnings may lead developers to dismiss the DeprecationWarning. "locale" is not a codec alias We don't add "locale" as a codec alias because the locale can be changed at runtime. Additionally, TextIOWrapper checks os.device_encoding() when encoding=None. This behavior cannot be implemented in a codec. Backward Compatibility The new warning is not emitted by default, so this PEP is 100% backwards-compatible. Forward Compatibility Passing "locale" as the argument to encoding is not forward-compatible. Code using it will not work on Python older than 3.10, and will instead raise LookupError: unknown encoding: locale. Until developers can drop Python 3.9 support, EncodingWarning can only be used for finding missing encoding="utf-8" arguments. How to Teach This For new users Since EncodingWarning is used to write cross-platform code, there is no need to teach it to new users. We can just recommend using UTF-8 for text files and using encoding="utf-8" when opening them. For experienced users Using open(filename) to read text files encoded in UTF-8 is a common mistake. It may not work on Windows because UTF-8 is not the default encoding. You can use -X warn_default_encoding or PYTHONWARNDEFAULTENCODING=1 to find this type of mistake. Omitting the encoding argument is not a bug when opening text files encoded in the locale encoding, but encoding="locale" is recommended in Python 3.10 and later because it is more explicit. Reference Implementation https://github.com/python/cpython/pull/19481 Discussions The latest discussion thread is: https://mail.python.org/archives/list/[email protected]/thread/SFYUP2TWD5JZ5KDLVSTZ44GWKVY4YNCV/ - Why not implement this in linters? - encoding="locale" and io.text_encoding() must be implemented in Python. - It is difficult to find all callers of functions wrapping open() or TextIOWrapper() (see the io.text_encoding() section). - Many developers will not use the option. - Some will, and report the warnings to libraries they use, so the option is worth it even if many developers don't enable it. - For example, I found[7] and[8] by running pip install -U pip, and[9] by running tox with the reference implementation. This demonstrates how this option can be used to find potential issues. References Copyright This document is placed in the public domain or under the CC0-1.0-Universal license, whichever is more permissive. [1] "Packages can't be installed when encoding is not UTF-8" (https://github.com/methane/pep597-pypi-ascii) [2] "Logging - Inconsistent behaviour when handling unicode" (https://bugs.python.org/issue37111) [3] Packaging tutorial in packaging.python.org didn't specify encoding to read a README.md (https://github.com/pypa/packaging.python.org/pull/682) [4] json.tool had used locale encoding to read JSON files. (https://bugs.python.org/issue33684) [5] site: Potential UnicodeDecodeError when handling pth file (https://bugs.python.org/issue33684) [6] pypa/pip: "Installing packages fails if Python 3 installed into path with non-ASCII characters" (https://github.com/pypa/pip/issues/9054) [7] "site: Potential UnicodeDecodeError when handling pth file" (https://bugs.python.org/issue43214) [8] "[pypa/pip] Use encoding option or binary mode for open()" (https://github.com/pypa/pip/pull/9608) [9] "Possible UnicodeError caused by missing encoding="utf-8"" (https://github.com/tox-dev/tox/issues/1908)
python-peps
2024-10-18T13:23:34.066600
2019-06-05T00:00:00
{ "license": "Public Domain", "url": "https://peps.python.org/pep-0597/", "authors": [ "Inada Naoki" ], "pep_number": "0597", "pandoc_version": "3.5" }
0548
PEP: 548 Title: More Flexible Loop Control Version: $Revision$ Last-Modified: $Date$ Author: R David Murray Status: Rejected Type: Standards Track Content-Type: text/x-rst Created: 05-Sep-2017 Python-Version: 3.7 Post-History: 05-Aug-2017 Rejection Note Rejection by Guido: https://mail.python.org/pipermail/python-dev/2017-September/149232.html Abstract This PEP proposes enhancing the break and continue statements with an optional boolean expression that controls whether or not they execute. This allows the flow of control in loops to be expressed more clearly and compactly. Motivation Quoting from the rejected PEP 315: It is often necessary for some code to be executed before each evaluation of the while loop condition. This code is often duplicated outside the loop, as setup code that executes once before entering the loop: <setup code> while <condition>: <loop body> <setup code> That PEP was rejected because no syntax was found that was superior to the following form: while True: <setup code> if not <condition>: break <loop body> This PEP proposes a superior form, one that also has application to for loops. It is superior because it makes the flow of control in loops more explicit, while preserving Python's indentation aesthetic. Syntax The syntax of the break and continue statements are extended as follows: break_stmt : "break" ["if" expression] continue_stmt : "continue" ["if" expression] In addition, the syntax of the while statement is modified as follows: while_stmt : while1_stmt|while2_stmt while1_stmt : "while" expression ":" suite ["else" ":" suite] while2_stmt : "while" ":" suite Semantics A break if or continue if is executed if and only if expression evaluates to true. A while statement with no expression loops until a break or return is executed (or an error is raised), as if it were a while True statement. Given that the loop can never terminate except in a way that would not cause an else suite to execute, no else suite is allowed in the expressionless form. If practical, it should also be an error if the body of an expressionless while does not contain at least one break or return statement. Justification and Examples The previous "best possible" form: while True: <setup code> if not <condition>: break <loop body> could be formatted as: while True: <setup code> if not <condition>: break <loop body> This is superficially almost identical to the form proposed by this PEP: while: <setup code> break if not <condition> <loop body> The significant difference here is that the loop flow control keyword appears first in the line of code. This makes it easier to comprehend the flow of control in the loop at a glance, especially when reading colorized code. For example, this is a common code pattern, taken in this case from the tarfile module: while True: buf = self._read(self.bufsize) if not buf: break t.append(buf) Reading this, we either see the break and possibly need to think about where the while is that it applies to, since the break is indented under the if, and then track backward to read the condition that triggers it; or, we read the condition and only afterward discover that this condition changes the flow of the loop. With the new syntax this becomes: while: buf = self._read(self.bufsize) break if not buf t.append(buf) Reading this we first see the break, which obviously applies to the while since it is at the same level of indentation as the loop body, and then we read the condition that causes the flow of control to change. Further, consider a more complex example from sre_parse: while True: c = self.next self.__next() if c is None: if not result: raise self.error("missing group name") raise self.error("missing %s, unterminated name" % terminator, len(result)) if c == terminator: if not result: raise self.error("missing group name", 1) break result += c return result This is the natural way to write this code given current Python loop control syntax. However, given break if, it would be more natural to write this as follows: while: c = self.next self.__next() break if c is None or c == terminator result += c if not result: raise self.error("missing group name") elif c is None: raise self.error("missing %s, unterminated name" % terminator, len(result)) return result This form moves the error handling out of the loop body, leaving the loop logic much more understandable. While it would certainly be possible to write the code this way using the current syntax, the proposed syntax makes it more natural to write it in the clearer form. The proposed syntax also provides a natural, Pythonic spelling of the classic repeat ... until <expression> construct found in other languages, and for which no good syntax has previously been found for Python: while: ... break if <expression> The tarfile module, for example, has a couple of "read until" loops like the following: while True: s = self.__read(1) if not s or s == NUL: break With the new syntax this would read more clearly: while: s = self.__read(1) break if not s or s == NUL The case for extending this syntax to continue is less strong, but buttressed by the value of consistency. It is much more common for a continue statement to be at the end of a multiline if suite, such as this example from zipfile : while True: try: self.fp = io.open(file, filemode) except OSError: if filemode in modeDict: filemode = modeDict[filemode] continue raise break The only opportunity for improvement the new syntax would offer for this loop would be the omission of the True token. On the other hand, consider this example from uuid.py: for i in range(adapters.length): ncb.Reset() ncb.Command = netbios.NCBRESET ncb.Lana_num = ord(adapters.lana[i]) if win32wnet.Netbios(ncb) != 0: continue ncb.Reset() ncb.Command = netbios.NCBASTAT ncb.Lana_num = ord(adapters.lana[i]) ncb.Callname = '*'.ljust(16) ncb.Buffer = status = netbios.ADAPTER_STATUS() if win32wnet.Netbios(ncb) != 0: continue status._unpack() bytes = status.adapter_address[:6] if len(bytes) != 6: continue return int.from_bytes(bytes, 'big') This becomes: for i in range(adapters.length): ncb.Reset() ncb.Command = netbios.NCBRESET ncb.Lana_num = ord(adapters.lana[i]) continue if win32wnet.Netbios(ncb) != 0 ncb.Reset() ncb.Command = netbios.NCBASTAT ncb.Lana_num = ord(adapters.lana[i]) ncb.Callname = '*'.ljust(16) ncb.Buffer = status = netbios.ADAPTER_STATUS() continue if win32wnet.Netbios(ncb) != 0 status._unpack() bytes = status.adapter_address[:6] continue if len(bytes) != 6 return int.from_bytes(bytes, 'big') This example indicates that there are non-trivial use cases where continue if also improves the readability of the loop code. It is probably significant to note that all of the examples selected for this PEP were found by grepping the standard library for while True and continue, and the relevant examples were found in the first four modules inspected. Copyright This document is placed in the public domain.
python-peps
2024-10-18T13:23:34.074898
2017-09-05T00:00:00
{ "license": "Public Domain", "url": "https://peps.python.org/pep-0548/", "authors": [ "R David Murray" ], "pep_number": "0548", "pandoc_version": "3.5" }
0455
PEP: 455 Title: Adding a key-transforming dictionary to collections Version: $Revision$ Last-Modified: $Date$ Author: Antoine Pitrou <[email protected]> BDFL-Delegate: Raymond Hettinger Status: Rejected Type: Standards Track Content-Type: text/x-rst Created: 13-Sep-2013 Python-Version: 3.5 Post-History: Abstract This PEP proposes a new data structure for the collections module, called "TransformDict" in this PEP. This structure is a mutable mapping which transforms the key using a given function when doing a lookup, but retains the original key when reading. Rejection See the rationale at https://mail.python.org/pipermail/python-dev/2015-May/140003.html and for an earlier partial review, see https://mail.python.org/pipermail/python-dev/2013-October/129937.html . Rationale Numerous specialized versions of this pattern exist. The most common is a case-insensitive case-preserving dict, i.e. a dict-like container which matches keys in a case-insensitive fashion but retains the original casing. It is a very common need in network programming, as many protocols feature some arrays of "key / value" properties in their messages, where the keys are textual strings whose case is specified to be ignored on receipt but by either specification or custom is to be preserved or non-trivially canonicalized when retransmitted. Another common request is an identity dict, where keys are matched according to their respective id()s instead of normal matching. Both are instances of a more general pattern, where a given transformation function is applied to keys when looking them up: that function being str.lower or str.casefold in the former example and the built-in id function in the latter. (It could be said that the pattern projects keys from the user-visible set onto the internal lookup set.) Semantics TransformDict is a MutableMapping implementation: it faithfully implements the well-known API of mutable mappings, like dict itself and other dict-like classes in the standard library. Therefore, this PEP won't rehash the semantics of most TransformDict methods. The transformation function needn't be bijective, it can be strictly surjective as in the case-insensitive example (in other words, different keys can lookup the same value): >>> d = TransformDict(str.casefold) >>> d['SomeKey'] = 5 >>> d['somekey'] 5 >>> d['SOMEKEY'] 5 TransformDict retains the first key used when creating an entry: >>> d = TransformDict(str.casefold) >>> d['SomeKey'] = 1 >>> d['somekey'] = 2 >>> list(d.items()) [('SomeKey', 2)] The original keys needn't be hashable, as long as the transformation function returns a hashable one: >>> d = TransformDict(id) >>> l = [None] >>> d[l] = 5 >>> l in d True Constructor As shown in the examples above, creating a TransformDict requires passing the key transformation function as the first argument (much like creating a defaultdict requires passing the factory function as first argument). The constructor also takes other optional arguments which can be used to initialize the TransformDict with certain key-value pairs. Those optional arguments are the same as in the dict and defaultdict constructors: >>> d = TransformDict(str.casefold, [('Foo', 1)], Bar=2) >>> sorted(d.items()) [('Bar', 2), ('Foo', 1)] Getting the original key TransformDict also features a lookup method returning the stored key together with the corresponding value: >>> d = TransformDict(str.casefold, {'Foo': 1}) >>> d.getitem('FOO') ('Foo', 1) >>> d.getitem('bar') Traceback (most recent call last): File "<stdin>", line 1, in <module> KeyError: 'bar' The method name getitem() follows the standard popitem() method on mutable mappings. Getting the transformation function TransformDict has a simple read-only property transform_func which gives back the transformation function. Alternative proposals and questions Retaining the last original key Most python-dev respondents found retaining the first user-supplied key more intuitive than retaining the last. Also, it matches the dict object's own behaviour when using different but equal keys: >>> d = {} >>> d[1] = 'hello' >>> d[1.0] = 'world' >>> d {1: 'world'} Furthermore, explicitly retaining the last key in a first-key-retaining scheme is still possible using the following approach: d.pop(key, None) d[key] = value while the converse (retaining the first key in a last-key-retaining scheme) doesn't look possible without rewriting part of the container's code. Using an encoder / decoder pair Using a function pair isn't necessary, since the original key is retained by the container. Moreover, an encoder / decoder pair would require the transformation to be bijective, which prevents important use cases like case-insensitive matching. Providing a transformation function for values Dictionary values are not used for lookup, their semantics are totally irrelevant to the container's operation. Therefore, there is no point in having both an "original" and a "transformed" value: the transformed value wouldn't be used for anything. Providing a specialized container, not generic It was asked why we would provide the generic TransformDict construct rather than a specialized case-insensitive dict variant. The answer is that it's nearly as cheap (code-wise and performance-wise) to provide the generic construct, and it can fill more use cases. Even case-insensitive dicts can actually elicit different transformation functions: str.lower, str.casefold or in some cases bytes.lower when working with text encoded in an ASCII-compatible encoding. Other constructor patterns Two other constructor patterns were proposed by Serhiy Storchaka: - A type factory scheme: d = TransformDict(str.casefold)(Foo=1) - A subclassing scheme: class CaseInsensitiveDict(TransformDict): __transform__ = str.casefold d = CaseInsensitiveDict(Foo=1) While both approaches can be defended, they don't follow established practices in the standard library, and therefore were rejected. Implementation A patch for the collections module is tracked on the bug tracker at http://bugs.python.org/issue18986. Existing work Case-insensitive dicts are a popular request: - http://twistedmatrix.com/documents/current/api/twisted.python.util.InsensitiveDict.html - https://mail.python.org/pipermail/python-list/2013-May/647243.html - https://mail.python.org/pipermail/python-list/2005-April/296208.html - https://mail.python.org/pipermail/python-list/2004-June/241748.html - http://bugs.python.org/msg197376 - http://stackoverflow.com/a/2082169 - http://stackoverflow.com/a/3296782 - http://code.activestate.com/recipes/66315-case-insensitive-dictionary/ - https://gist.github.com/babakness/3901174 - http://www.wikier.org/blog/key-insensitive-dictionary-in-python - http://en.sharejs.com/python/14534 - http://www.voidspace.org.uk/python/archive.shtml#caseless Identity dicts have been requested too: - https://mail.python.org/pipermail/python-ideas/2010-May/007235.html - http://www.gossamer-threads.com/lists/python/python/209527 Several modules in the standard library use identity lookups for object memoization, for example pickle, json, copy, cProfile, doctest and _threading_local. Other languages C# / .Net .Net has a generic Dictionary class where you can specify a custom IEqualityComparer: http://msdn.microsoft.com/en-us/library/xfhwa508.aspx Using it is the recommended way to write case-insensitive dictionaries: http://stackoverflow.com/questions/13230414/case-insensitive-access-for-generic-dictionary Java Java has a specialized CaseInsensitiveMap: http://commons.apache.org/proper/commons-collections/apidocs/org/apache/commons/collections4/map/CaseInsensitiveMap.html It also has a separate IdentityHashMap: http://docs.oracle.com/javase/6/docs/api/java/util/IdentityHashMap.html C++ The C++ Standard Template Library features an unordered_map with customizable hash and equality functions: http://www.cplusplus.com/reference/unordered_map/unordered_map/ Copyright This document has been placed in the public domain. Local Variables: mode: indented-text indent-tabs-mode: nil sentence-end-double-space: t fill-column: 70 coding: utf-8 End:
python-peps
2024-10-18T13:23:34.087549
2013-09-13T00:00:00
{ "license": "Public Domain", "url": "https://peps.python.org/pep-0455/", "authors": [ "Antoine Pitrou" ], "pep_number": "0455", "pandoc_version": "3.5" }
0377
PEP: 377 Title: Allow __enter__() methods to skip the statement body Version: $Revision$ Last-Modified: $Date$ Author: Alyssa Coghlan <[email protected]> Status: Rejected Type: Standards Track Content-Type: text/x-rst Created: 08-Mar-2009 Python-Version: 2.7, 3.1 Post-History: 08-Mar-2009 Abstract This PEP proposes a backwards compatible mechanism that allows __enter__() methods to skip the body of the associated with statement. The lack of this ability currently means the contextlib.contextmanager decorator is unable to fulfil its specification of being able to turn arbitrary code into a context manager by moving it into a generator function with a yield in the appropriate location. One symptom of this is that contextlib.nested will currently raise RuntimeError in situations where writing out the corresponding nested with statements would not[1]. The proposed change is to introduce a new flow control exception SkipStatement, and skip the execution of the with statement body if __enter__() raises this exception. PEP Rejection This PEP was rejected by Guido[2] as it imposes too great an increase in complexity without a proportional increase in expressiveness and correctness. In the absence of compelling use cases that need the more complex semantics proposed by this PEP the existing behaviour is considered acceptable. Proposed Change The semantics of the with statement will be changed to include a new try/except/else block around the call to __enter__(). If SkipStatement is raised by the __enter__() method, then the main section of the with statement (now located in the else clause) will not be executed. To avoid leaving the names in any as clause unbound in this case, a new StatementSkipped singleton (similar to the existing NotImplemented singleton) will be assigned to all names that appear in the as clause. The components of the with statement remain as described in PEP 343: with EXPR as VAR: BLOCK After the modification, the with statement semantics would be as follows: mgr = (EXPR) exit = mgr.__exit__ # Not calling it yet try: value = mgr.__enter__() except SkipStatement: VAR = StatementSkipped # Only if "as VAR" is present and # VAR is a single name # If VAR is a tuple of names, then StatementSkipped # will be assigned to each name in the tuple else: exc = True try: try: VAR = value # Only if "as VAR" is present BLOCK except: # The exceptional case is handled here exc = False if not exit(*sys.exc_info()): raise # The exception is swallowed if exit() returns true finally: # The normal and non-local-goto cases are handled here if exc: exit(None, None, None) With the above change in place for the with statement semantics, contextlib.contextmanager() will then be modified to raise SkipStatement instead of RuntimeError when the underlying generator doesn't yield. Rationale for Change Currently, some apparently innocuous context managers may raise RuntimeError when executed. This occurs when the context manager's __enter__() method encounters a situation where the written out version of the code corresponding to the context manager would skip the code that is now the body of the with statement. Since the __enter__() method has no mechanism available to signal this to the interpreter, it is instead forced to raise an exception that not only skips the body of the with statement, but also jumps over all code until the nearest exception handler. This goes against one of the design goals of the with statement, which was to be able to factor out arbitrary common exception handling code into a single context manager by putting into a generator function and replacing the variant part of the code with a yield statement. Specifically, the following examples behave differently if cmB().__enter__() raises an exception which cmA().__exit__() then handles and suppresses: with cmA(): with cmB(): do_stuff() # This will resume here without executing "do_stuff()" @contextlib.contextmanager def combined(): with cmA(): with cmB(): yield with combined(): do_stuff() # This will raise a RuntimeError complaining that the context # manager's underlying generator didn't yield with contextlib.nested(cmA(), cmB()): do_stuff() # This will raise the same RuntimeError as the contextmanager() # example (unsurprising, given that the nested() implementation # uses contextmanager()) # The following class based version shows that the issue isn't # specific to contextlib.contextmanager() (it also shows how # much simpler it is to write context managers as generators # instead of as classes!) class CM(object): def __init__(self): self.cmA = None self.cmB = None def __enter__(self): if self.cmA is not None: raise RuntimeError("Can't re-use this CM") self.cmA = cmA() self.cmA.__enter__() try: self.cmB = cmB() self.cmB.__enter__() except: self.cmA.__exit__(*sys.exc_info()) # Can't suppress in __enter__(), so must raise raise def __exit__(self, *args): suppress = False try: if self.cmB is not None: suppress = self.cmB.__exit__(*args) except: suppress = self.cmA.__exit__(*sys.exc_info()): if not suppress: # Exception has changed, so reraise explicitly raise else: if suppress: # cmB already suppressed the exception, # so don't pass it to cmA suppress = self.cmA.__exit__(None, None, None): else: suppress = self.cmA.__exit__(*args): return suppress With the proposed semantic change in place, the contextlib based examples above would then "just work", but the class based version would need a small adjustment to take advantage of the new semantics: class CM(object): def __init__(self): self.cmA = None self.cmB = None def __enter__(self): if self.cmA is not None: raise RuntimeError("Can't re-use this CM") self.cmA = cmA() self.cmA.__enter__() try: self.cmB = cmB() self.cmB.__enter__() except: if self.cmA.__exit__(*sys.exc_info()): # Suppress the exception, but don't run # the body of the with statement either raise SkipStatement raise def __exit__(self, *args): suppress = False try: if self.cmB is not None: suppress = self.cmB.__exit__(*args) except: suppress = self.cmA.__exit__(*sys.exc_info()): if not suppress: # Exception has changed, so reraise explicitly raise else: if suppress: # cmB already suppressed the exception, # so don't pass it to cmA suppress = self.cmA.__exit__(None, None, None): else: suppress = self.cmA.__exit__(*args): return suppress There is currently a tentative suggestion[3] to add import-style syntax to the with statement to allow multiple context managers to be included in a single with statement without needing to use contextlib.nested. In that case the compiler has the option of simply emitting multiple with statements at the AST level, thus allowing the semantics of actual nested with statements to be reproduced accurately. However, such a change would highlight rather than alleviate the problem the current PEP aims to address: it would not be possible to use contextlib.contextmanager to reliably factor out such with statements, as they would exhibit exactly the same semantic differences as are seen with the combined() context manager in the above example. Performance Impact Implementing the new semantics makes it necessary to store the references to the __enter__ and __exit__ methods in temporary variables instead of on the stack. This results in a slight regression in with statement speed relative to Python 2.6/3.1. However, implementing a custom SETUP_WITH opcode would negate any differences between the two approaches (as well as dramatically improving speed by eliminating more than a dozen unnecessary trips around the eval loop). Reference Implementation Patch attached to Issue 5251[4]. That patch uses only existing opcodes (i.e. no SETUP_WITH). Acknowledgements James William Pye both raised the issue and suggested the basic outline of the solution described in this PEP. References Copyright This document has been placed in the public domain. [1] Issue 5251: contextlib.nested inconsistent with nested with statements (http://bugs.python.org/issue5251) [2] Guido's rejection of the PEP (https://mail.python.org/pipermail/python-dev/2009-March/087263.html) [3] Import-style syntax to reduce indentation of nested with statements (https://mail.python.org/pipermail/python-ideas/2009-March/003188.html) [4] Issue 5251: contextlib.nested inconsistent with nested with statements (http://bugs.python.org/issue5251)
python-peps
2024-10-18T13:23:34.097952
2009-03-08T00:00:00
{ "license": "Public Domain", "url": "https://peps.python.org/pep-0377/", "authors": [ "Alyssa Coghlan" ], "pep_number": "0377", "pandoc_version": "3.5" }
0349
PEP: 349 Title: Allow str() to return unicode strings Version: $Revision$ Last-Modified: $Date$ Author: Neil Schemenauer <[email protected]> Status: Rejected Type: Standards Track Content-Type: text/x-rst Created: 02-Aug-2005 Python-Version: 2.5 Post-History: 06-Aug-2005 Resolution: https://mail.python.org/archives/list/[email protected]/message/M2Y3PUFLAE23NPRJPVBYF6P5LW5LVN6F/ Abstract This PEP proposes to change the str() built-in function so that it can return unicode strings. This change would make it easier to write code that works with either string type and would also make some existing code handle unicode strings. The C function PyObject_Str() would remain unchanged and the function PyString_New() would be added instead. Rationale Python has had a Unicode string type for some time now but use of it is not yet widespread. There is a large amount of Python code that assumes that string data is represented as str instances. The long-term plan for Python is to phase out the str type and use unicode for all string data. Clearly, a smooth migration path must be provided. We need to upgrade existing libraries, written for str instances, to be made capable of operating in an all-unicode string world. We can't change to an all-unicode world until all essential libraries are made capable for it. Upgrading the libraries in one shot does not seem feasible. A more realistic strategy is to individually make the libraries capable of operating on unicode strings while preserving their current all-str environment behaviour. First, we need to be able to write code that can accept unicode instances without attempting to coerce them to str instances. Let us label such code as Unicode-safe. Unicode-safe libraries can be used in an all-unicode world. Second, we need to be able to write code that, when provided only str instances, will not create unicode results. Let us label such code as str-stable. Libraries that are str-stable can be used by libraries and applications that are not yet Unicode-safe. Sometimes it is simple to write code that is both str-stable and Unicode-safe. For example, the following function just works: def appendx(s): return s + 'x' That's not too surprising since the unicode type is designed to make the task easier. The principle is that when str and unicode instances meet, the result is a unicode instance. One notable difficulty arises when code requires a string representation of an object; an operation traditionally accomplished by using the str() built-in function. Using the current str() function makes the code not Unicode-safe. Replacing a str() call with a unicode() call makes the code not str-stable. Changing str() so that it could return unicode instances would solve this problem. As a further benefit, some code that is currently not Unicode-safe because it uses str() would become Unicode-safe. Specification A Python implementation of the str() built-in follows: def str(s): """Return a nice string representation of the object. The return value is a str or unicode instance. """ if type(s) is str or type(s) is unicode: return s r = s.__str__() if not isinstance(r, (str, unicode)): raise TypeError('__str__ returned non-string') return r The following function would be added to the C API and would be the equivalent to the str() built-in (ideally it be called PyObject_Str, but changing that function could cause a massive number of compatibility problems): PyObject *PyString_New(PyObject *); A reference implementation is available on Sourceforge[1] as a patch. Backwards Compatibility Some code may require that str() returns a str instance. In the standard library, only one such case has been found so far. The function email.header_decode() requires a str instance and the email.Header.decode_header() function tries to ensure this by calling str() on its argument. The code was fixed by changing the line "header = str(header)" to: if isinstance(header, unicode): header = header.encode('ascii') Whether this is truly a bug is questionable since decode_header() really operates on byte strings, not character strings. Code that passes it a unicode instance could itself be considered buggy. Alternative Solutions A new built-in function could be added instead of changing str(). Doing so would introduce virtually no backwards compatibility problems. However, since the compatibility problems are expected to rare, changing str() seems preferable to adding a new built-in. The basestring type could be changed to have the proposed behaviour, rather than changing str(). However, that would be confusing behaviour for an abstract base type. References Copyright This document has been placed in the public domain. [1] https://bugs.python.org/issue1266570
python-peps
2024-10-18T13:23:34.105181
2005-08-02T00:00:00
{ "license": "Public Domain", "url": "https://peps.python.org/pep-0349/", "authors": [ "Neil Schemenauer" ], "pep_number": "0349", "pandoc_version": "3.5" }
0589
PEP: 589 Title: TypedDict: Type Hints for Dictionaries with a Fixed Set of Keys Author: Jukka Lehtosalo <[email protected]> Sponsor: Guido van Rossum <[email protected]> BDFL-Delegate: Guido van Rossum <[email protected]> Discussions-To: [email protected] Status: Final Type: Standards Track Topic: Typing Created: 20-Mar-2019 Python-Version: 3.8 Post-History: Resolution: https://mail.python.org/archives/list/[email protected]/message/FDO4KFYWYQEP3U2HVVBEBR3SXPHQSHYR/ typing:typeddict and typing.TypedDict Abstract PEP 484 defines the type Dict[K, V] for uniform dictionaries, where each value has the same type, and arbitrary key values are supported. It doesn't properly support the common pattern where the type of a dictionary value depends on the string value of the key. This PEP proposes a type constructor typing.TypedDict to support the use case where a dictionary object has a specific set of string keys, each with a value of a specific type. Here is an example where PEP 484 doesn't allow us to annotate satisfactorily: movie = {'name': 'Blade Runner', 'year': 1982} This PEP proposes the addition of a new type constructor, called TypedDict, to allow the type of movie to be represented precisely: from typing import TypedDict class Movie(TypedDict): name: str year: int Now a type checker should accept this code: movie: Movie = {'name': 'Blade Runner', 'year': 1982} Motivation Representing an object or structured data using (potentially nested) dictionaries with string keys (instead of a user-defined class) is a common pattern in Python programs. Representing JSON objects is perhaps the canonical use case, and this is popular enough that Python ships with a JSON library. This PEP proposes a way to allow such code to be type checked more effectively. More generally, representing pure data objects using only Python primitive types such as dictionaries, strings and lists has had certain appeal. They are easy to serialize and deserialize even when not using JSON. They trivially support various useful operations with no extra effort, including pretty-printing (through str() and the pprint module), iteration, and equality comparisons. PEP 484 doesn't properly support the use cases mentioned above. Let's consider a dictionary object that has exactly two valid string keys, 'name' with value type str, and 'year' with value type int. The PEP 484 type Dict[str, Any] would be suitable, but it is too lenient, as arbitrary string keys can be used, and arbitrary values are valid. Similarly, Dict[str, Union[str, int]] is too general, as the value for key 'name' could be an int, and arbitrary string keys are allowed. Also, the type of a subscription expression such as d['name'] (assuming d to be a dictionary of this type) would be Union[str, int], which is too wide. Dataclasses are a more recent alternative to solve this use case, but there is still a lot of existing code that was written before dataclasses became available, especially in large existing codebases where type hinting and checking has proven to be helpful. Unlike dictionary objects, dataclasses don't directly support JSON serialization, though there is a third-party package that implements it[1]. Specification A TypedDict type represents dictionary objects with a specific set of string keys, and with specific value types for each valid key. Each string key can be either required (it must be present) or non-required (it doesn't need to exist). This PEP proposes two ways of defining TypedDict types. The first uses a class-based syntax. The second is an alternative assignment-based syntax that is provided for backwards compatibility, to allow the feature to be backported to older Python versions. The rationale is similar to why PEP 484 supports a comment-based annotation syntax for Python 2.7: type hinting is particularly useful for large existing codebases, and these often need to run on older Python versions. The two syntax options parallel the syntax variants supported by typing.NamedTuple. Other proposed features include TypedDict inheritance and totality (specifying whether keys are required or not). This PEP also provides a sketch of how a type checker is expected to support type checking operations involving TypedDict objects. Similar to PEP 484, this discussion is left somewhat vague on purpose, to allow experimentation with a wide variety of different type checking approaches. In particular, type compatibility should be based on structural compatibility: a more specific TypedDict type can be compatible with a smaller (more general) TypedDict type. Class-based Syntax A TypedDict type can be defined using the class definition syntax with typing.TypedDict as the sole base class: from typing import TypedDict class Movie(TypedDict): name: str year: int Movie is a TypedDict type with two items: 'name' (with type str) and 'year' (with type int). A type checker should validate that the body of a class-based TypedDict definition conforms to the following rules: - The class body should only contain lines with item definitions of the form key: value_type, optionally preceded by a docstring. The syntax for item definitions is identical to attribute annotations, but there must be no initializer, and the key name actually refers to the string value of the key instead of an attribute name. - Type comments cannot be used with the class-based syntax, for consistency with the class-based NamedTuple syntax. (Note that it would not be sufficient to support type comments for backwards compatibility with Python 2.7, since the class definition may have a total keyword argument, as discussed below, and this isn't valid syntax in Python 2.7.) Instead, this PEP provides an alternative, assignment-based syntax for backwards compatibility, discussed in Alternative Syntax. - String literal forward references are valid in the value types. - Methods are not allowed, since the runtime type of a TypedDict object will always be just dict (it is never a subclass of dict). - Specifying a metaclass is not allowed. An empty TypedDict can be created by only including pass in the body (if there is a docstring, pass can be omitted): class EmptyDict(TypedDict): pass Using TypedDict Types Here is an example of how the type Movie can be used: movie: Movie = {'name': 'Blade Runner', 'year': 1982} An explicit Movie type annotation is generally needed, as otherwise an ordinary dictionary type could be assumed by a type checker, for backwards compatibility. When a type checker can infer that a constructed dictionary object should be a TypedDict, an explicit annotation can be omitted. A typical example is a dictionary object as a function argument. In this example, a type checker is expected to infer that the dictionary argument should be understood as a TypedDict: def record_movie(movie: Movie) -> None: ... record_movie({'name': 'Blade Runner', 'year': 1982}) Another example where a type checker should treat a dictionary display as a TypedDict is in an assignment to a variable with a previously declared TypedDict type: movie: Movie ... movie = {'name': 'Blade Runner', 'year': 1982} Operations on movie can be checked by a static type checker: movie['director'] = 'Ridley Scott' # Error: invalid key 'director' movie['year'] = '1982' # Error: invalid value type ("int" expected) The code below should be rejected, since 'title' is not a valid key, and the 'name' key is missing: movie2: Movie = {'title': 'Blade Runner', 'year': 1982} The created TypedDict type object is not a real class object. Here are the only uses of the type a type checker is expected to allow: - It can be used in type annotations and in any context where an arbitrary type hint is valid, such as in type aliases and as the target type of a cast. - It can be used as a callable object with keyword arguments corresponding to the TypedDict items. Non-keyword arguments are not allowed. Example: m = Movie(name='Blade Runner', year=1982) When called, the TypedDict type object returns an ordinary dictionary object at runtime: print(type(m)) # <class 'dict'> - It can be used as a base class, but only when defining a derived TypedDict. This is discussed in more detail below. In particular, TypedDict type objects cannot be used in isinstance() tests such as isinstance(d, Movie). The reason is that there is no existing support for checking types of dictionary item values, since isinstance() does not work with many PEP 484 types, including common ones like List[str]. This would be needed for cases like this: class Strings(TypedDict): items: List[str] print(isinstance({'items': [1]}, Strings)) # Should be False print(isinstance({'items': ['x']}, Strings)) # Should be True The above use case is not supported. This is consistent with how isinstance() is not supported for List[str]. Inheritance It is possible for a TypedDict type to inherit from one or more TypedDict types using the class-based syntax. In this case the TypedDict base class should not be included. Example: class BookBasedMovie(Movie): based_on: str Now BookBasedMovie has keys name, year, and based_on. It is equivalent to this definition, since TypedDict types use structural compatibility: class BookBasedMovie(TypedDict): name: str year: int based_on: str Here is an example of multiple inheritance: class X(TypedDict): x: int class Y(TypedDict): y: str class XYZ(X, Y): z: bool The TypedDict XYZ has three items: x (type int), y (type str), and z (type bool). A TypedDict cannot inherit from both a TypedDict type and a non-TypedDict base class. Additional notes on TypedDict class inheritance: - Changing a field type of a parent TypedDict class in a subclass is not allowed. Example: class X(TypedDict): x: str class Y(X): x: int # Type check error: cannot overwrite TypedDict field "x" In the example outlined above TypedDict class annotations returns type str for key x: print(Y.__annotations__) # {'x': <class 'str'>} - Multiple inheritance does not allow conflict types for the same name field: class X(TypedDict): x: int class Y(TypedDict): x: str class XYZ(X, Y): # Type check error: cannot overwrite TypedDict field "x" while merging xyz: bool Totality By default, all keys must be present in a TypedDict. It is possible to override this by specifying totality. Here is how to do this using the class-based syntax: class Movie(TypedDict, total=False): name: str year: int This means that a Movie TypedDict can have any of the keys omitted. Thus these are valid: m: Movie = {} m2: Movie = {'year': 2015} A type checker is only expected to support a literal False or True as the value of the total argument. True is the default, and makes all items defined in the class body be required. The totality flag only applies to items defined in the body of the TypedDict definition. Inherited items won't be affected, and instead use totality of the TypedDict type where they were defined. This makes it possible to have a combination of required and non-required keys in a single TypedDict type. Alternative Syntax This PEP also proposes an alternative syntax that can be backported to older Python versions such as 3.5 and 2.7 that don't support the variable definition syntax introduced in PEP 526. It resembles the traditional syntax for defining named tuples: Movie = TypedDict('Movie', {'name': str, 'year': int}) It is also possible to specify totality using the alternative syntax: Movie = TypedDict('Movie', {'name': str, 'year': int}, total=False) The semantics are equivalent to the class-based syntax. This syntax doesn't support inheritance, however, and there is no way to have both required and non-required fields in a single type. The motivation for this is keeping the backwards compatible syntax as simple as possible while covering the most common use cases. A type checker is only expected to accept a dictionary display expression as the second argument to TypedDict. In particular, a variable that refers to a dictionary object does not need to be supported, to simplify implementation. Type Consistency Informally speaking, type consistency is a generalization of the is-subtype-of relation to support the Any type. It is defined more formally in PEP 483. This section introduces the new, non-trivial rules needed to support type consistency for TypedDict types. First, any TypedDict type is consistent with Mapping[str, object]. Second, a TypedDict type A is consistent with TypedDict B if A is structurally compatible with B. This is true if and only if both of these conditions are satisfied: - For each key in B, A has the corresponding key and the corresponding value type in A is consistent with the value type in B. For each key in B, the value type in B is also consistent with the corresponding value type in A. - For each required key in B, the corresponding key is required in A. For each non-required key in B, the corresponding key is not required in A. Discussion: - Value types behave invariantly, since TypedDict objects are mutable. This is similar to mutable container types such as List and Dict. Example where this is relevant: class A(TypedDict): x: Optional[int] class B(TypedDict): x: int def f(a: A) -> None: a['x'] = None b: B = {'x': 0} f(b) # Type check error: 'B' not compatible with 'A' b['x'] + 1 # Runtime error: None + 1 - A TypedDict type with a required key is not consistent with a TypedDict type where the same key is a non-required key, since the latter allows keys to be deleted. Example where this is relevant: class A(TypedDict, total=False): x: int class B(TypedDict): x: int def f(a: A) -> None: del a['x'] b: B = {'x': 0} f(b) # Type check error: 'B' not compatible with 'A' b['x'] + 1 # Runtime KeyError: 'x' - A TypedDict type A with no key 'x' is not consistent with a TypedDict type with a non-required key 'x', since at runtime the key 'x' could be present and have an incompatible type (which may not be visible through A due to structural subtyping). Example: class A(TypedDict, total=False): x: int y: int class B(TypedDict, total=False): x: int class C(TypedDict, total=False): x: int y: str def f(a: A) -> None: a['y'] = 1 def g(b: B) -> None: f(b) # Type check error: 'B' incompatible with 'A' c: C = {'x': 0, 'y': 'foo'} g(c) c['y'] + 'bar' # Runtime error: int + str - A TypedDict isn't consistent with any Dict[...] type, since dictionary types allow destructive operations, including clear(). They also allow arbitrary keys to be set, which would compromise type safety. Example: class A(TypedDict): x: int class B(A): y: str def f(d: Dict[str, int]) -> None: d['y'] = 0 def g(a: A) -> None: f(a) # Type check error: 'A' incompatible with Dict[str, int] b: B = {'x': 0, 'y': 'foo'} g(b) b['y'] + 'bar' # Runtime error: int + str - A TypedDict with all int values is not consistent with Mapping[str, int], since there may be additional non-int values not visible through the type, due to structural subtyping. These can be accessed using the values() and items() methods in Mapping, for example. Example: class A(TypedDict): x: int class B(TypedDict): x: int y: str def sum_values(m: Mapping[str, int]) -> int: n = 0 for v in m.values(): n += v # Runtime error return n def f(a: A) -> None: sum_values(a) # Error: 'A' incompatible with Mapping[str, int] b: B = {'x': 0, 'y': 'foo'} f(b) Supported and Unsupported Operations Type checkers should support restricted forms of most dict operations on TypedDict objects. The guiding principle is that operations not involving Any types should be rejected by type checkers if they may violate runtime type safety. Here are some of the most important type safety violations to prevent: 1. A required key is missing. 2. A value has an invalid type. 3. A key that is not defined in the TypedDict type is added. A key that is not a literal should generally be rejected, since its value is unknown during type checking, and thus can cause some of the above violations. (Use of Final Values and Literal Types generalizes this to cover final names and literal types.) The use of a key that is not known to exist should be reported as an error, even if this wouldn't necessarily generate a runtime type error. These are often mistakes, and these may insert values with an invalid type if structural subtyping hides the types of certain items. For example, d['x'] = 1 should generate a type check error if 'x' is not a valid key for d (which is assumed to be a TypedDict type). Extra keys included in TypedDict object construction should also be caught. In this example, the director key is not defined in Movie and is expected to generate an error from a type checker: m: Movie = dict( name='Alien', year=1979, director='Ridley Scott') # error: Unexpected key 'director' Type checkers should reject the following operations on TypedDict objects as unsafe, even though they are valid for normal dictionaries: - Operations with arbitrary str keys (instead of string literals or other expressions with known string values) should generally be rejected. This involves both destructive operations such as setting an item and read-only operations such as subscription expressions. As an exception to the above rule, d.get(e) and e in d should be allowed for TypedDict objects, for an arbitrary expression e with type str. The motivation is that these are safe and can be useful for introspecting TypedDict objects. The static type of d.get(e) should be object if the string value of e cannot be determined statically. - clear() is not safe since it could remove required keys, some of which may not be directly visible because of structural subtyping. popitem() is similarly unsafe, even if all known keys are not required (total=False). - del obj['key'] should be rejected unless 'key' is a non-required key. Type checkers may allow reading an item using d['x'] even if the key 'x' is not required, instead of requiring the use of d.get('x') or an explicit 'x' in d check. The rationale is that tracking the existence of keys is difficult to implement in full generality, and that disallowing this could require many changes to existing code. The exact type checking rules are up to each type checker to decide. In some cases potentially unsafe operations may be accepted if the alternative is to generate false positive errors for idiomatic code. Use of Final Values and Literal Types Type checkers should allow final names (PEP 591) with string values to be used instead of string literals in operations on TypedDict objects. For example, this is valid: YEAR: Final = 'year' m: Movie = {'name': 'Alien', 'year': 1979} years_since_epoch = m[YEAR] - 1970 Similarly, an expression with a suitable literal type (PEP 586) can be used instead of a literal value: def get_value(movie: Movie, key: Literal['year', 'name']) -> Union[int, str]: return movie[key] Type checkers are only expected to support actual string literals, not final names or literal types, for specifying keys in a TypedDict type definition. Also, only a boolean literal can be used to specify totality in a TypedDict definition. The motivation for this is to make type declarations self-contained, and to simplify the implementation of type checkers. Backwards Compatibility To retain backwards compatibility, type checkers should not infer a TypedDict type unless it is sufficiently clear that this is desired by the programmer. When unsure, an ordinary dictionary type should be inferred. Otherwise existing code that type checks without errors may start generating errors once TypedDict support is added to the type checker, since TypedDict types are more restrictive than dictionary types. In particular, they aren't subtypes of dictionary types. Reference Implementation The mypy[2] type checker supports TypedDict types. A reference implementation of the runtime component is provided in the typing_extensions[3] module. The original implementation was in the mypy_extensions[4] module. Rejected Alternatives Several proposed ideas were rejected. The current set of features seem to cover a lot of ground, and it was not clear which of the proposed extensions would be more than marginally useful. This PEP defines a baseline feature that can be potentially extended later. These are rejected on principle, as incompatible with the spirit of this proposal: - TypedDict isn't extensible, and it addresses only a specific use case. TypedDict objects are regular dictionaries at runtime, and TypedDict cannot be used with other dictionary-like or mapping-like classes, including subclasses of dict. There is no way to add methods to TypedDict types. The motivation here is simplicity. - TypedDict type definitions could plausibly used to perform runtime type checking of dictionaries. For example, they could be used to validate that a JSON object conforms to the schema specified by a TypedDict type. This PEP doesn't include such functionality, since the focus of this proposal is static type checking only, and other existing types do not support this, as discussed in Class-based syntax. Such functionality can be provided by a third-party library using the typing_inspect[5] third-party module, for example. - TypedDict types can't be used in isinstance() or issubclass() checks. The reasoning is similar to why runtime type checks aren't supported in general with many type hints. These features were left out from this PEP, but they are potential extensions to be added in the future: - TypedDict doesn't support providing a default value type for keys that are not explicitly defined. This would allow arbitrary keys to be used with a TypedDict object, and only explicitly enumerated keys would receive special treatment compared to a normal, uniform dictionary type. - There is no way to individually specify whether each key is required or not. No proposed syntax was clear enough, and we expect that there is limited need for this. - TypedDict can't be used for specifying the type of a **kwargs argument. This would allow restricting the allowed keyword arguments and their types. According to PEP 484, using a TypedDict type as the type of **kwargs means that the TypedDict is valid as the value of arbitrary keyword arguments, but it doesn't restrict which keyword arguments should be allowed. The syntax **kwargs: Expand[T] has been proposed for this[6]. Acknowledgements David Foster contributed the initial implementation of TypedDict types to mypy. Improvements to the implementation have been contributed by at least the author (Jukka Lehtosalo), Ivan Levkivskyi, Gareth T, Michael Lee, Dominik Miedzinski, Roy Williams and Max Moroz. References Copyright This document has been placed in the public domain. [1] Dataclasses JSON (https://github.com/lidatong/dataclasses-json) [2] http://www.mypy-lang.org/ [3] https://github.com/python/typing/tree/master/typing_extensions [4] https://github.com/python/mypy_extensions [5] https://github.com/ilevkivskyi/typing_inspect [6] https://github.com/python/mypy/issues/4441
python-peps
2024-10-18T13:23:34.223993
2019-03-20T00:00:00
{ "license": "Public Domain", "url": "https://peps.python.org/pep-0589/", "authors": [ "Jukka Lehtosalo" ], "pep_number": "0589", "pandoc_version": "3.5" }
0431
PEP: 431 Title: Time zone support improvements Version: $Revision$ Last-Modified: $Date$ Author: Lennart Regebro <[email protected]> BDFL-Delegate: Barry Warsaw <[email protected]> Status: Superseded Type: Standards Track Content-Type: text/x-rst Created: 11-Dec-2012 Post-History: 11-Dec-2012, 28-Dec-2012, 28-Jan-2013 Superseded-By: 615 Abstract This PEP proposes the implementation of concrete time zone support in the Python standard library, and also improvements to the time zone API to deal with ambiguous time specifications during DST changes. Withdrawal After lengthy discussion it has turned out that the things I thought was problem in datetime's implementation are intentional. Those include completely ignoring DST transitions when making date time arithmetic. That makes the is_dst flags part of this PEP pointless, as they would have no useful function. datetime by design does not separate between ambiguous datetimes and will never do so. I therefore withdraw this PEP. UPDATE: The PEP 615 "Support for the IANA Time Zone Database in the Standard Library" added the zoneinfo module to Python 3.9 and superseded this PEP. Proposal Concrete time zone support The time zone support in Python has no concrete implementation in the standard library outside of a tzinfo baseclass that supports fixed offsets. To properly support time zones you need to include a database over all time zones, both current and historical, including daylight saving changes. But such information changes frequently, so even if we include the last information in a Python release, that information would be outdated just a few months later. Time zone support has therefore only been available through two third-party modules, pytz and dateutil, both who include and wrap the "zoneinfo" database. This database, also called "tz" or "The Olsen database", is the de facto standard time zone database over time zones, and it is included in most Unix and Unix-like operating systems, including OS X. This gives us the opportunity to include the code that supports the zoneinfo data in the standard library, but by default use the operating system's copy of the data, which typically will be kept updated by the updating mechanism of the operating system or distribution. For those who have an operating system that does not include the zoneinfo database, for example Windows, the Python source distribution will include a copy of the zoneinfo database, and a distribution containing the latest zoneinfo database will also be available at the Python Package Index, so it can be easily installed with the Python packaging tools such as easy_install or pip. This could also be done on Unices that are no longer receiving updates and therefore have an outdated database. With such a mechanism Python would have full-time zone support in the standard library on any platform, and a simple package installation would provide an updated time zone database on those platforms where the zoneinfo database isn't included, such as Windows, or on platforms where OS updates are no longer provided. The time zone support will be implemented by making the datetime module into a package, and adding time zone support to datetime based on Stuart Bishop's pytz module. Getting the local time zone On Unix there is no standard way of finding the name of the time zone that is being used. All the information that is available is the time zone abbreviations, such as EST and PDT, but many of those abbreviations are ambiguous and therefore you can't rely on them to figure out which time zone you are located in. There is however a standard for finding the compiled time zone information since it's located in /etc/localtime. Therefore, it is possible to create a local time zone object with the correct time zone information even though you don't know the name of the time zone. A function in datetime should be provided to return the local time zone. The support for this will be made by integrating Lennart Regebro's tzlocal module into the new datetime module. For Windows it will look up the local Windows time zone name, and use a mapping between Windows time zone names and zoneinfo time zone names provided by the Unicode consortium to convert that to a zoneinfo time zone. The mapping should be updated before each major or bugfix release, scripts for doing so will be provided in the Tools/ directory. Ambiguous times When changing over from daylight savings time (DST) the clock is turned back one hour. This means that the times during that hour happens twice, once with DST and then once without DST. Similarly, when changing to daylight savings time, one hour goes missing. The current time zone API can not differentiate between the two ambiguous times during a change from DST. For example, in Stockholm the time of 2012-11-28 02:00:00 happens twice, both at UTC 2012-11-28 00:00:00 and also at 2012-11-28 01:00:00. The current time zone API can not disambiguate this and therefore it's unclear which time should be returned: # This could be either 00:00 or 01:00 UTC: >>> dt = datetime(2012, 10, 28, 2, 0, tzinfo=zoneinfo('Europe/Stockholm')) # But we can not specify which: >>> dt.astimezone(zoneinfo('UTC')) datetime.datetime(2012, 10, 28, 1, 0, tzinfo=<UTC>) pytz solved this problem by adding is_dst parameters to several methods of the tzinfo objects to make it possible to disambiguate times when this is desired. This PEP proposes to add these is_dst parameters to the relevant methods of the datetime API, and therefore add this functionality directly to datetime. This is likely the hardest part of this PEP as this involves updating the C version of the datetime library with this functionality, as this involved writing new code, and not just reorganizing existing external libraries. Implementation API The zoneinfo database The latest version of the zoneinfo database should exist in the Lib/tzdata directory of the Python source control system. This copy of the database should be updated before every Python feature and bug-fix release, but not for releases of Python versions that are in security-fix-only-mode. Scripts to update the database will be provided in Tools/, and the release instructions will be updated to include this update. New configure options --enable-internal-timezone-database and --disable-internal-timezone-database will be implemented to enable and disable the installation of this database when installing from source. A source install will default to installing them. Binary installers for systems that have a system-provided zoneinfo database may skip installing the included database since it would never be used for these platforms. For other platforms, for example Windows, binary installers must install the included database. Changes in the datetime-module The public API of the new time zone support contains one new class, one new function, one new exception and four new collections. In addition to this, several methods on the datetime object gets a new is_dst parameter. New class dsttimezone This class provides a concrete implementation of the tzinfo base class that implements DST support. New function zoneinfo(name=None, db_path=None) This function takes a name string that must be a string specifying a valid zoneinfo time zone, i.e. "US/Eastern", "Europe/Warsaw" or "Etc/GMT". If not given, the local time zone will be looked up. If an invalid zone name is given, or the local time zone can not be retrieved, the function raises UnknownTimeZoneError. The function also takes an optional path to the location of the zoneinfo database which should be used. If not specified, the function will look for databases in the following order: 1. Check if the tzdata-update module is installed, and then use that database. 2. Use the database in /usr/share/zoneinfo, if it exists. 3. Use the Python-provided database in Lib/tzdata. If no database is found an UnknownTimeZoneError or subclass thereof will be raised with a message explaining that no zoneinfo database can be found, but that you can install one with the tzdata-update package. New parameter is_dst A new is_dst parameter is added to several methods to handle time ambiguity during DST changeovers. - tzinfo.utcoffset(dt, is_dst=False) - tzinfo.dst(dt, is_dst=False) - tzinfo.tzname(dt, is_dst=False) - datetime.astimezone(tz, is_dst=False) The is_dst parameter can be False (default), True, or None. False will specify that the given datetime should be interpreted as not happening during daylight savings time, i.e. that the time specified is after the change from DST. This is default to preserve existing behavior. True will specify that the given datetime should be interpreted as happening during daylight savings time, i.e. that the time specified is before the change from DST. None will raise an AmbiguousTimeError exception if the time specified was during a DST change over. It will also raise a NonExistentTimeError if a time is specified during the "missing time" in a change to DST. New exceptions - UnknownTimeZoneError This exception is a subclass of KeyError and raised when giving a time zone specification that can't be found: >>> datetime.zoneinfo('Europe/New_York') Traceback (most recent call last): ... UnknownTimeZoneError: There is no time zone called 'Europe/New_York' - InvalidTimeError This exception serves as a base for AmbiguousTimeError and NonExistentTimeError, to enable you to trap these two separately. It will subclass from ValueError, so that you can catch these errors together with inputs like the 29th of February 2011. - AmbiguousTimeError This exception is raised when giving a datetime specification that is ambiguous while setting is_dst to None: >>> datetime(2012, 11, 28, 2, 0, tzinfo=zoneinfo('Europe/Stockholm'), is_dst=None) >>> Traceback (most recent call last): ... AmbiguousTimeError: 2012-10-28 02:00:00 is ambiguous in time zone Europe/Stockholm - NonExistentTimeError This exception is raised when giving a datetime specification for a time that due to daylight saving does not exist, while setting is_dst to None: >>> datetime(2012, 3, 25, 2, 0, tzinfo=zoneinfo('Europe/Stockholm'), is_dst=None) >>> Traceback (most recent call last): ... NonExistentTimeError: 2012-03-25 02:00:00 does not exist in time zone Europe/Stockholm New collections - all_timezones is the exhaustive list of the time zone names that can be used, listed alphabetically. - common_timezones is a list of useful, current time zones, listed alphabetically. The tzdata-update-package The zoneinfo database will be packaged for easy installation with easy_install/pip/buildout. This package will not install any Python code, and will not contain any Python code except that which is needed for installation. It will be kept updated with the same tools as the internal database, but released whenever the zoneinfo-database is updated, and use the same version schema. Differences from the pytz API - pytz has the functions localize() and normalize() to work around that tzinfo doesn't have is_dst. When is_dst is implemented directly in datetime.tzinfo they are no longer needed. - The timezone() function is called zoneinfo() to avoid clashing with the timezone class introduced in Python 3.2. - zoneinfo() will return the local time zone if called without arguments. - The class pytz.StaticTzInfo is there to provide the is_dst support for static time zones. When is_dst support is included in datetime.tzinfo it is no longer needed. - InvalidTimeError subclasses from ValueError. Resources - http://pytz.sourceforge.net/ - http://pypi.python.org/pypi/tzlocal - http://pypi.python.org/pypi/python-dateutil - http://unicode.org/cldr/data/common/supplemental/windowsZones.xml Copyright This document has been placed in the public domain.
python-peps
2024-10-18T13:23:34.242973
2012-12-11T00:00:00
{ "license": "Public Domain", "url": "https://peps.python.org/pep-0431/", "authors": [ "Lennart Regebro" ], "pep_number": "0431", "pandoc_version": "3.5" }