{ // 获取包含Hugging Face文本的span元素 const spans = link.querySelectorAll('span.whitespace-nowrap, span.hidden.whitespace-nowrap'); spans.forEach(span => { if (span.textContent && span.textContent.trim().match(/Hugging\s*Face/i)) { span.textContent = 'AI快站'; } }); }); // 替换logo图片的alt属性 document.querySelectorAll('img[alt*="Hugging"], img[alt*="Face"]').forEach(img => { if (img.alt.match(/Hugging\s*Face/i)) { img.alt = 'AI快站 logo'; } }); } // 替换导航栏中的链接 function replaceNavigationLinks() { // 已替换标记,防止重复运行 if (window._navLinksReplaced) { return; } // 已经替换过的链接集合,防止重复替换 const replacedLinks = new Set(); // 只在导航栏区域查找和替换链接 const headerArea = document.querySelector('header') || document.querySelector('nav'); if (!headerArea) { return; } // 在导航区域内查找链接 const navLinks = headerArea.querySelectorAll('a'); navLinks.forEach(link => { // 如果已经替换过,跳过 if (replacedLinks.has(link)) return; const linkText = link.textContent.trim(); const linkHref = link.getAttribute('href') || ''; // 替换Spaces链接 - 仅替换一次 if ( (linkHref.includes('/spaces') || linkHref === '/spaces' || linkText === 'Spaces' || linkText.match(/^s*Spacess*$/i)) && linkText !== 'PDF TO Markdown' && linkText !== 'PDF TO Markdown' ) { link.textContent = 'PDF TO Markdown'; link.href = 'https://fast360.xyz'; link.setAttribute('target', '_blank'); link.setAttribute('rel', 'noopener noreferrer'); replacedLinks.add(link); } // 删除Posts链接 else if ( (linkHref.includes('/posts') || linkHref === '/posts' || linkText === 'Posts' || linkText.match(/^s*Postss*$/i)) ) { if (link.parentNode) { link.parentNode.removeChild(link); } replacedLinks.add(link); } // 替换Docs链接 - 仅替换一次 else if ( (linkHref.includes('/docs') || linkHref === '/docs' || linkText === 'Docs' || linkText.match(/^s*Docss*$/i)) && linkText !== 'Voice Cloning' ) { link.textContent = 'Voice Cloning'; link.href = 'https://vibevoice.info/'; replacedLinks.add(link); } // 删除Enterprise链接 else if ( (linkHref.includes('/enterprise') || linkHref === '/enterprise' || linkText === 'Enterprise' || linkText.match(/^s*Enterprises*$/i)) ) { if (link.parentNode) { link.parentNode.removeChild(link); } replacedLinks.add(link); } }); // 查找可能嵌套的Spaces和Posts文本 const textNodes = []; function findTextNodes(element) { if (element.nodeType === Node.TEXT_NODE) { const text = element.textContent.trim(); if (text === 'Spaces' || text === 'Posts' || text === 'Enterprise') { textNodes.push(element); } } else { for (const child of element.childNodes) { findTextNodes(child); } } } // 只在导航区域内查找文本节点 findTextNodes(headerArea); // 替换找到的文本节点 textNodes.forEach(node => { const text = node.textContent.trim(); if (text === 'Spaces') { node.textContent = node.textContent.replace(/Spaces/g, 'PDF TO Markdown'); } else if (text === 'Posts') { // 删除Posts文本节点 if (node.parentNode) { node.parentNode.removeChild(node); } } else if (text === 'Enterprise') { // 删除Enterprise文本节点 if (node.parentNode) { node.parentNode.removeChild(node); } } }); // 标记已替换完成 window._navLinksReplaced = true; } // 替换代码区域中的域名 function replaceCodeDomains() { // 特别处理span.hljs-string和span.njs-string元素 document.querySelectorAll('span.hljs-string, span.njs-string, span[class*="hljs-string"], span[class*="njs-string"]').forEach(span => { if (span.textContent && span.textContent.includes('huggingface.co')) { span.textContent = span.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } }); // 替换hljs-string类的span中的域名(移除多余的转义符号) document.querySelectorAll('span.hljs-string, span[class*="hljs-string"]').forEach(span => { if (span.textContent && span.textContent.includes('huggingface.co')) { span.textContent = span.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } }); // 替换pre和code标签中包含git clone命令的域名 document.querySelectorAll('pre, code').forEach(element => { if (element.textContent && element.textContent.includes('git clone')) { const text = element.innerHTML; if (text.includes('huggingface.co')) { element.innerHTML = text.replace(/huggingface.co/g, 'aifasthub.com'); } } }); // 处理特定的命令行示例 document.querySelectorAll('pre, code').forEach(element => { const text = element.innerHTML; if (text.includes('huggingface.co')) { // 针对git clone命令的专门处理 if (text.includes('git clone') || text.includes('GIT_LFS_SKIP_SMUDGE=1')) { element.innerHTML = text.replace(/huggingface.co/g, 'aifasthub.com'); } } }); // 特别处理模型下载页面上的代码片段 document.querySelectorAll('.flex.border-t, .svelte_hydrator, .inline-block').forEach(container => { const content = container.innerHTML; if (content && content.includes('huggingface.co')) { container.innerHTML = content.replace(/huggingface.co/g, 'aifasthub.com'); } }); // 特别处理模型仓库克隆对话框中的代码片段 try { // 查找包含"Clone this model repository"标题的对话框 const cloneDialog = document.querySelector('.svelte_hydration_boundary, [data-target="MainHeader"]'); if (cloneDialog) { // 查找对话框中所有的代码片段和命令示例 const codeElements = cloneDialog.querySelectorAll('pre, code, span'); codeElements.forEach(element => { if (element.textContent && element.textContent.includes('huggingface.co')) { if (element.innerHTML.includes('huggingface.co')) { element.innerHTML = element.innerHTML.replace(/huggingface.co/g, 'aifasthub.com'); } else { element.textContent = element.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } } }); } // 更精确地定位克隆命令中的域名 document.querySelectorAll('[data-target]').forEach(container => { const codeBlocks = container.querySelectorAll('pre, code, span.hljs-string'); codeBlocks.forEach(block => { if (block.textContent && block.textContent.includes('huggingface.co')) { if (block.innerHTML.includes('huggingface.co')) { block.innerHTML = block.innerHTML.replace(/huggingface.co/g, 'aifasthub.com'); } else { block.textContent = block.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } } }); }); } catch (e) { // 错误处理但不打印日志 } } // 当DOM加载完成后执行替换 if (document.readyState === 'loading') { document.addEventListener('DOMContentLoaded', () => { replaceHeaderBranding(); replaceNavigationLinks(); replaceCodeDomains(); // 只在必要时执行替换 - 3秒后再次检查 setTimeout(() => { if (!window._navLinksReplaced) { console.log('[Client] 3秒后重新检查导航链接'); replaceNavigationLinks(); } }, 3000); }); } else { replaceHeaderBranding(); replaceNavigationLinks(); replaceCodeDomains(); // 只在必要时执行替换 - 3秒后再次检查 setTimeout(() => { if (!window._navLinksReplaced) { console.log('[Client] 3秒后重新检查导航链接'); replaceNavigationLinks(); } }, 3000); } // 增加一个MutationObserver来处理可能的动态元素加载 const observer = new MutationObserver(mutations => { // 检查是否导航区域有变化 const hasNavChanges = mutations.some(mutation => { // 检查是否存在header或nav元素变化 return Array.from(mutation.addedNodes).some(node => { if (node.nodeType === Node.ELEMENT_NODE) { // 检查是否是导航元素或其子元素 if (node.tagName === 'HEADER' || node.tagName === 'NAV' || node.querySelector('header, nav')) { return true; } // 检查是否在导航元素内部 let parent = node.parentElement; while (parent) { if (parent.tagName === 'HEADER' || parent.tagName === 'NAV') { return true; } parent = parent.parentElement; } } return false; }); }); // 只在导航区域有变化时执行替换 if (hasNavChanges) { // 重置替换状态,允许再次替换 window._navLinksReplaced = false; replaceHeaderBranding(); replaceNavigationLinks(); } }); // 开始观察document.body的变化,包括子节点 if (document.body) { observer.observe(document.body, { childList: true, subtree: true }); } else { document.addEventListener('DOMContentLoaded', () => { observer.observe(document.body, { childList: true, subtree: true }); }); } })(); \n\n\"\"\"\ntemplate_translatable = gettext_lazy(\"Index of %(directory)s\")\n\n\ndef directory_index(path, fullpath):\n try:\n t = loader.select_template([\n 'static/directory_index.html',\n 'static/directory_index',\n ])\n except TemplateDoesNotExist:\n t = Engine(libraries={'i18n': 'django.templatetags.i18n'}).from_string(DEFAULT_DIRECTORY_INDEX_TEMPLATE)\n c = Context()\n else:\n c = {}\n files = []\n for f in fullpath.iterdir():\n if not f.name.startswith('.'):\n url = str(f.relative_to(fullpath))\n if f.is_dir():\n url += '/'\n files.append(url)\n c.update({\n 'directory': path + '/',\n 'file_list': files,\n })\n return HttpResponse(t.render(c))\n\n\ndef was_modified_since(header=None, mtime=0, size=0):\n \"\"\"\n Was something modified since the user last downloaded it?\n\n header\n This is the value of the If-Modified-Since header. If this is None,\n I'll just return True.\n\n mtime\n This is the modification time of the item we're talking about.\n\n size\n This is the size of the item we're talking about.\n \"\"\"\n try:\n if header is None:\n raise ValueError\n matches = re.match(r\"^([^;]+)(; length=([0-9]+))?$\", header,\n re.IGNORECASE)\n header_mtime = parse_http_date(matches[1])\n header_len = matches[3]\n if header_len and int(header_len) != size:\n raise ValueError\n if int(mtime) > header_mtime:\n raise ValueError\n except (AttributeError, ValueError, OverflowError):\n return True\n return False\n"},"license":{"kind":"string","value":"bsd-3-clause"}}},{"rowIdx":2708,"cells":{"repo_name":{"kind":"string","value":"ajvpot/CTFd"},"path":{"kind":"string","value":"migrations/versions/75e8ab9a0014_add_fields_and_fieldentries_tables.py"},"copies":{"kind":"string","value":"4"},"size":{"kind":"string","value":"1867"},"content":{"kind":"string","value":"\"\"\"Add Fields and FieldEntries tables\n\nRevision ID: 75e8ab9a0014\nRevises: 0366ba6575ca\nCreate Date: 2020-08-19 00:36:17.579497\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = \"75e8ab9a0014\"\ndown_revision = \"0366ba6575ca\"\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table(\n \"fields\",\n sa.Column(\"id\", sa.Integer(), nullable=False),\n sa.Column(\"name\", sa.Text(), nullable=True),\n sa.Column(\"type\", sa.String(length=80), nullable=True),\n sa.Column(\"field_type\", sa.String(length=80), nullable=True),\n sa.Column(\"description\", sa.Text(), nullable=True),\n sa.Column(\"required\", sa.Boolean(), nullable=True),\n sa.Column(\"public\", sa.Boolean(), nullable=True),\n sa.Column(\"editable\", sa.Boolean(), nullable=True),\n sa.PrimaryKeyConstraint(\"id\"),\n )\n op.create_table(\n \"field_entries\",\n sa.Column(\"id\", sa.Integer(), nullable=False),\n sa.Column(\"type\", sa.String(length=80), nullable=True),\n sa.Column(\"value\", sa.JSON(), nullable=True),\n sa.Column(\"field_id\", sa.Integer(), nullable=True),\n sa.Column(\"user_id\", sa.Integer(), nullable=True),\n sa.Column(\"team_id\", sa.Integer(), nullable=True),\n sa.ForeignKeyConstraint([\"field_id\"], [\"fields.id\"], ondelete=\"CASCADE\"),\n sa.ForeignKeyConstraint([\"team_id\"], [\"teams.id\"], ondelete=\"CASCADE\"),\n sa.ForeignKeyConstraint([\"user_id\"], [\"users.id\"], ondelete=\"CASCADE\"),\n sa.PrimaryKeyConstraint(\"id\"),\n )\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_table(\"field_entries\")\n op.drop_table(\"fields\")\n # ### end Alembic commands ###\n"},"license":{"kind":"string","value":"apache-2.0"}}},{"rowIdx":2709,"cells":{"repo_name":{"kind":"string","value":"WorldViews/Spirals"},"path":{"kind":"string","value":"KinPy/KinOSCWatcher.py"},"copies":{"kind":"string","value":"2"},"size":{"kind":"string","value":"5277"},"content":{"kind":"string","value":"\nimport os, socket, time\nimport threading\n#import MessageBoard\nimport traceback\nimport OSC\n\nOSC_SERVER = None\n\nALL_JOINTS = {\n 'HandRight': 'RIGHT_HAND',\n 'HandLeft': 'LEFT_HAND',\n 'WristRight': 'RIGHT_WRIST',\n 'WristLeft': 'LEFT_WRIST',\n 'ElbowRight': 'RIGHT_ELBOW',\n 'ElbowLeft': 'LEFT_ELBOW',\n 'ShoulderRight': 'RIGHT_SHOULDER',\n 'ShoulderLeft': 'LEFT_SHOULDER',\n 'Neck': 'NECK',\n 'Head': 'HEAD',\n 'SpineMid': 'MID_SPINE',\n 'SpineBase': 'BASE_SPINE',\n 'HipRight': 'RIGHT_HIP',\n 'HipLeft': 'LEFT_HIP',\n 'KneeRight': 'RIGHT_KNEE',\n 'KneeLeft': 'LEFT_KNEE',\n 'AnkleRight': 'RIGHT_ANKLE',\n 'AnkleLeft': 'LEFT_ANKLE',\n 'FootRight': 'RIGHT_FOOT',\n 'FootLeft': 'LEFT_FOOT'\n}\n\nJOINTS = {\n 'HandRight': 'RIGHT_HAND',\n 'HandLeft': 'LEFT_HAND',\n 'ElbowRight': 'RIGHT_ELBOW',\n 'ElbowLeft': 'LEFT_ELBOW',\n 'Head': 'HEAD'\n }\nKINECT_CONTROL = None\n\n\"\"\"\nThis is a simple class for holding the message associate\nwith a body, and some other information such as body num\nor timing.\n\"\"\"\nclass Body:\n numBodies = 0\n bodyById = {}\n\n @staticmethod\n def getBody(bodyId):\n if bodyId in Body.bodyById:\n return Body.bodyById[bodyId]\n# MyOSCHandler.numPeople += 1\n# personNum = MyOSCHandler.numPeople\n body = Body(bodyId)\n Body.bodyById[bodyId] = body\n return body\n\n def __init__(self, id):\n Body.numBodies += 1\n self.bodyId = id\n self.personNum = Body.numBodies\n self.msg = None\n\n def setJoint(self, joint, xyz, trackState):\n \"\"\"\n This gets called with a joint position and acculumates\n the joint information in a message. When this gets called\n with a joint that is already in the message, it is assumed\n the message is \"complete\" (i.e. has a complete set of\n the joints being watched) and a single message is sent\n with all those joints.\n \"\"\"\n global OSC_SERVER\n #print \"buildMessage\", bodyId, joint, xyz\n if JOINTS != None:\n jname = JOINTS[joint]\n else:\n jname = joint\n msg = self.msg\n if msg != None and jname in msg:\n #print \"sending message!!!!\", msg\n if OSC_SERVER.kinSkelHandler:\n OSC_SERVER.kinSkelHandler(msg)\n msg = None\n if msg == None:\n msg = {'msgType':'kinect.skeleton.pose',\n 'personNum': self.personNum}\n msg[jname] = xyz\n c = .2\n if trackState == 'Tracked':\n c = 1.0\n msg[\"%s_c\" % jname] = c\n self.msg = msg\n\n\n\nclass MyOSCHandler(OSC.OSCRequestHandler):\n def dispatchMessage(self, pattern, tags, data):\n parts = pattern.split(\"/\")\n if len(parts) != 5:\n print \"Unexpected number of parts\"\n return []\n bodyId = parts[2]\n if parts[3] == \"hands\":\n if tags != \"ss\":\n print \"Unexpected format\", tags\n print \"pattern:\", pattern\n return []\n elif parts[3] == \"joints\":\n joint = parts[4]\n if tags != \"fffs\":\n print \"Unexpected format\", tags\n print \"pattern:\", pattern\n return []\n if JOINTS and joint not in JOINTS:\n return []\n #print \"data: %s\\n\" % (data,)\n x,y,z,trackState = data\n pos = 1000.0*x, 1000.0*y, 1000.0*z\n body = Body.getBody(bodyId)\n body.setJoint(joint, pos, trackState)\n else:\n print \"Unexpected pattern\", pattern\n return []\n if self.server.kinJointHandler:\n body = Body.getBody(bodyId)\n msg = {'msgType': 'joint', 'personNum': body.personNum, 'joint': joint,\n 'bodyId': bodyId, 'pos': [x,y,z]}\n self.server.kinJointHandler(msg)\n# if SERVER:\n# SERVER.sendMessageToAllClients(msg)\n return []\n\n\n#class MyOSCServer(OSC.ThreadingOSCServer):\nclass MyOSCServer(OSC.OSCServer):\n RequestHandlerClass = MyOSCHandler\n\ndef bodyMsgHandler(msg):\n print msg\n\nOSC_HOST_ADDR = None\nOSC_PORT = 12345\n\ndef getOSC_ADDR():\n global OSC_HOST_ADDR\n if not OSC_HOST_ADDR:\n host = socket.gethostname()\n OSC_HOST_ADDR = socket.gethostbyname(host)\n \"\"\"\n path = \"%s.OSC_PARAMS.json\"\n if os.path.exists(path):\n try:\n params = json.load(file(path))\n return tuple(params['OSC_ADDR'])\n except:\n traceback.print_exc()\n return OSC_ADDR\n \"\"\"\n return OSC_HOST_ADDR, OSC_PORT\n\ndef startOSC(kinSkelHandler=None, kinJointHandler=None):\n global OSC_SERVER\n addr = getOSC_ADDR()\n print \"Using addr:\", addr\n s = MyOSCServer(addr)\n OSC_SERVER = s\n s.kinSkelHandler = kinSkelHandler\n s.kinJointHandler = kinJointHandler\n #s.app = app\n s.addMsgHandler(\"/bodies\", bodyMsgHandler)\n t = threading.Thread(target=s.serve_forever)\n t.start()\n #t.setDaemon(True)\n #s.serve_forever()\n\ndef kinSkelHandler(msg):\n if 0:\n print msg\n\ndef kinJointHandler(msg):\n if 0:\n print msg\n\ndef run(setupServer=True):\n startOSC(kinSkelHandler, kinJointHandler)\n while 1:\n time.sleep(1)\n\n\nif __name__ == '__main__':\n run()\n\n"},"license":{"kind":"string","value":"mit"}}},{"rowIdx":2710,"cells":{"repo_name":{"kind":"string","value":"int19h/PTVS"},"path":{"kind":"string","value":"Python/Product/Miniconda/Miniconda3-x64/Lib/heapq.py"},"copies":{"kind":"string","value":"14"},"size":{"kind":"string","value":"23017"},"content":{"kind":"string","value":"\"\"\"Heap queue algorithm (a.k.a. priority queue).\n\nHeaps are arrays for which a[k] <= a[2*k+1] and a[k] <= a[2*k+2] for\nall k, counting elements from 0. For the sake of comparison,\nnon-existing elements are considered to be infinite. The interesting\nproperty of a heap is that a[0] is always its smallest element.\n\nUsage:\n\nheap = [] # creates an empty heap\nheappush(heap, item) # pushes a new item on the heap\nitem = heappop(heap) # pops the smallest item from the heap\nitem = heap[0] # smallest item on the heap without popping it\nheapify(x) # transforms list into a heap, in-place, in linear time\nitem = heapreplace(heap, item) # pops and returns smallest item, and adds\n # new item; the heap size is unchanged\n\nOur API differs from textbook heap algorithms as follows:\n\n- We use 0-based indexing. This makes the relationship between the\n index for a node and the indexes for its children slightly less\n obvious, but is more suitable since Python uses 0-based indexing.\n\n- Our heappop() method returns the smallest item, not the largest.\n\nThese two make it possible to view the heap as a regular Python list\nwithout surprises: heap[0] is the smallest item, and heap.sort()\nmaintains the heap invariant!\n\"\"\"\n\n# Original code by Kevin O'Connor, augmented by Tim Peters and Raymond Hettinger\n\n__about__ = \"\"\"Heap queues\n\n[explanation by François Pinard]\n\nHeaps are arrays for which a[k] <= a[2*k+1] and a[k] <= a[2*k+2] for\nall k, counting elements from 0. For the sake of comparison,\nnon-existing elements are considered to be infinite. The interesting\nproperty of a heap is that a[0] is always its smallest element.\n\nThe strange invariant above is meant to be an efficient memory\nrepresentation for a tournament. The numbers below are `k', not a[k]:\n\n 0\n\n 1 2\n\n 3 4 5 6\n\n 7 8 9 10 11 12 13 14\n\n 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30\n\n\nIn the tree above, each cell `k' is topping `2*k+1' and `2*k+2'. In\na usual binary tournament we see in sports, each cell is the winner\nover the two cells it tops, and we can trace the winner down the tree\nto see all opponents s/he had. However, in many computer applications\nof such tournaments, we do not need to trace the history of a winner.\nTo be more memory efficient, when a winner is promoted, we try to\nreplace it by something else at a lower level, and the rule becomes\nthat a cell and the two cells it tops contain three different items,\nbut the top cell \"wins\" over the two topped cells.\n\nIf this heap invariant is protected at all time, index 0 is clearly\nthe overall winner. The simplest algorithmic way to remove it and\nfind the \"next\" winner is to move some loser (let's say cell 30 in the\ndiagram above) into the 0 position, and then percolate this new 0 down\nthe tree, exchanging values, until the invariant is re-established.\nThis is clearly logarithmic on the total number of items in the tree.\nBy iterating over all items, you get an O(n ln n) sort.\n\nA nice feature of this sort is that you can efficiently insert new\nitems while the sort is going on, provided that the inserted items are\nnot \"better\" than the last 0'th element you extracted. This is\nespecially useful in simulation contexts, where the tree holds all\nincoming events, and the \"win\" condition means the smallest scheduled\ntime. When an event schedule other events for execution, they are\nscheduled into the future, so they can easily go into the heap. So, a\nheap is a good structure for implementing schedulers (this is what I\nused for my MIDI sequencer :-).\n\nVarious structures for implementing schedulers have been extensively\nstudied, and heaps are good for this, as they are reasonably speedy,\nthe speed is almost constant, and the worst case is not much different\nthan the average case. However, there are other representations which\nare more efficient overall, yet the worst cases might be terrible.\n\nHeaps are also very useful in big disk sorts. You most probably all\nknow that a big sort implies producing \"runs\" (which are pre-sorted\nsequences, which size is usually related to the amount of CPU memory),\nfollowed by a merging passes for these runs, which merging is often\nvery cleverly organised[1]. It is very important that the initial\nsort produces the longest runs possible. Tournaments are a good way\nto that. If, using all the memory available to hold a tournament, you\nreplace and percolate items that happen to fit the current run, you'll\nproduce runs which are twice the size of the memory for random input,\nand much better for input fuzzily ordered.\n\nMoreover, if you output the 0'th item on disk and get an input which\nmay not fit in the current tournament (because the value \"wins\" over\nthe last output value), it cannot fit in the heap, so the size of the\nheap decreases. The freed memory could be cleverly reused immediately\nfor progressively building a second heap, which grows at exactly the\nsame rate the first heap is melting. When the first heap completely\nvanishes, you switch heaps and start a new run. Clever and quite\neffective!\n\nIn a word, heaps are useful memory structures to know. I use them in\na few applications, and I think it is good to keep a `heap' module\naround. :-)\n\n--------------------\n[1] The disk balancing algorithms which are current, nowadays, are\nmore annoying than clever, and this is a consequence of the seeking\ncapabilities of the disks. On devices which cannot seek, like big\ntape drives, the story was quite different, and one had to be very\nclever to ensure (far in advance) that each tape movement will be the\nmost effective possible (that is, will best participate at\n\"progressing\" the merge). Some tapes were even able to read\nbackwards, and this was also used to avoid the rewinding time.\nBelieve me, real good tape sorts were quite spectacular to watch!\nFrom all times, sorting has always been a Great Art! :-)\n\"\"\"\n\n__all__ = ['heappush', 'heappop', 'heapify', 'heapreplace', 'merge',\n 'nlargest', 'nsmallest', 'heappushpop']\n\ndef heappush(heap, item):\n \"\"\"Push item onto heap, maintaining the heap invariant.\"\"\"\n heap.append(item)\n _siftdown(heap, 0, len(heap)-1)\n\ndef heappop(heap):\n \"\"\"Pop the smallest item off the heap, maintaining the heap invariant.\"\"\"\n lastelt = heap.pop() # raises appropriate IndexError if heap is empty\n if heap:\n returnitem = heap[0]\n heap[0] = lastelt\n _siftup(heap, 0)\n return returnitem\n return lastelt\n\ndef heapreplace(heap, item):\n \"\"\"Pop and return the current smallest value, and add the new item.\n\n This is more efficient than heappop() followed by heappush(), and can be\n more appropriate when using a fixed-size heap. Note that the value\n returned may be larger than item! That constrains reasonable uses of\n this routine unless written as part of a conditional replacement:\n\n if item > heap[0]:\n item = heapreplace(heap, item)\n \"\"\"\n returnitem = heap[0] # raises appropriate IndexError if heap is empty\n heap[0] = item\n _siftup(heap, 0)\n return returnitem\n\ndef heappushpop(heap, item):\n \"\"\"Fast version of a heappush followed by a heappop.\"\"\"\n if heap and heap[0] < item:\n item, heap[0] = heap[0], item\n _siftup(heap, 0)\n return item\n\ndef heapify(x):\n \"\"\"Transform list into a heap, in-place, in O(len(x)) time.\"\"\"\n n = len(x)\n # Transform bottom-up. The largest index there's any point to looking at\n # is the largest with a child index in-range, so must have 2*i + 1 < n,\n # or i < (n-1)/2. If n is even = 2*j, this is (2*j-1)/2 = j-1/2 so\n # j-1 is the largest, which is n//2 - 1. If n is odd = 2*j+1, this is\n # (2*j+1-1)/2 = j so j-1 is the largest, and that's again n//2-1.\n for i in reversed(range(n//2)):\n _siftup(x, i)\n\ndef _heappop_max(heap):\n \"\"\"Maxheap version of a heappop.\"\"\"\n lastelt = heap.pop() # raises appropriate IndexError if heap is empty\n if heap:\n returnitem = heap[0]\n heap[0] = lastelt\n _siftup_max(heap, 0)\n return returnitem\n return lastelt\n\ndef _heapreplace_max(heap, item):\n \"\"\"Maxheap version of a heappop followed by a heappush.\"\"\"\n returnitem = heap[0] # raises appropriate IndexError if heap is empty\n heap[0] = item\n _siftup_max(heap, 0)\n return returnitem\n\ndef _heapify_max(x):\n \"\"\"Transform list into a maxheap, in-place, in O(len(x)) time.\"\"\"\n n = len(x)\n for i in reversed(range(n//2)):\n _siftup_max(x, i)\n\n# 'heap' is a heap at all indices >= startpos, except possibly for pos. pos\n# is the index of a leaf with a possibly out-of-order value. Restore the\n# heap invariant.\ndef _siftdown(heap, startpos, pos):\n newitem = heap[pos]\n # Follow the path to the root, moving parents down until finding a place\n # newitem fits.\n while pos > startpos:\n parentpos = (pos - 1) >> 1\n parent = heap[parentpos]\n if newitem < parent:\n heap[pos] = parent\n pos = parentpos\n continue\n break\n heap[pos] = newitem\n\n# The child indices of heap index pos are already heaps, and we want to make\n# a heap at index pos too. We do this by bubbling the smaller child of\n# pos up (and so on with that child's children, etc) until hitting a leaf,\n# then using _siftdown to move the oddball originally at index pos into place.\n#\n# We *could* break out of the loop as soon as we find a pos where newitem <=\n# both its children, but turns out that's not a good idea, and despite that\n# many books write the algorithm that way. During a heap pop, the last array\n# element is sifted in, and that tends to be large, so that comparing it\n# against values starting from the root usually doesn't pay (= usually doesn't\n# get us out of the loop early). See Knuth, Volume 3, where this is\n# explained and quantified in an exercise.\n#\n# Cutting the # of comparisons is important, since these routines have no\n# way to extract \"the priority\" from an array element, so that intelligence\n# is likely to be hiding in custom comparison methods, or in array elements\n# storing (priority, record) tuples. Comparisons are thus potentially\n# expensive.\n#\n# On random arrays of length 1000, making this change cut the number of\n# comparisons made by heapify() a little, and those made by exhaustive\n# heappop() a lot, in accord with theory. Here are typical results from 3\n# runs (3 just to demonstrate how small the variance is):\n#\n# Compares needed by heapify Compares needed by 1000 heappops\n# -------------------------- --------------------------------\n# 1837 cut to 1663 14996 cut to 8680\n# 1855 cut to 1659 14966 cut to 8678\n# 1847 cut to 1660 15024 cut to 8703\n#\n# Building the heap by using heappush() 1000 times instead required\n# 2198, 2148, and 2219 compares: heapify() is more efficient, when\n# you can use it.\n#\n# The total compares needed by list.sort() on the same lists were 8627,\n# 8627, and 8632 (this should be compared to the sum of heapify() and\n# heappop() compares): list.sort() is (unsurprisingly!) more efficient\n# for sorting.\n\ndef _siftup(heap, pos):\n endpos = len(heap)\n startpos = pos\n newitem = heap[pos]\n # Bubble up the smaller child until hitting a leaf.\n childpos = 2*pos + 1 # leftmost child position\n while childpos < endpos:\n # Set childpos to index of smaller child.\n rightpos = childpos + 1\n if rightpos < endpos and not heap[childpos] < heap[rightpos]:\n childpos = rightpos\n # Move the smaller child up.\n heap[pos] = heap[childpos]\n pos = childpos\n childpos = 2*pos + 1\n # The leaf at pos is empty now. Put newitem there, and bubble it up\n # to its final resting place (by sifting its parents down).\n heap[pos] = newitem\n _siftdown(heap, startpos, pos)\n\ndef _siftdown_max(heap, startpos, pos):\n 'Maxheap variant of _siftdown'\n newitem = heap[pos]\n # Follow the path to the root, moving parents down until finding a place\n # newitem fits.\n while pos > startpos:\n parentpos = (pos - 1) >> 1\n parent = heap[parentpos]\n if parent < newitem:\n heap[pos] = parent\n pos = parentpos\n continue\n break\n heap[pos] = newitem\n\ndef _siftup_max(heap, pos):\n 'Maxheap variant of _siftup'\n endpos = len(heap)\n startpos = pos\n newitem = heap[pos]\n # Bubble up the larger child until hitting a leaf.\n childpos = 2*pos + 1 # leftmost child position\n while childpos < endpos:\n # Set childpos to index of larger child.\n rightpos = childpos + 1\n if rightpos < endpos and not heap[rightpos] < heap[childpos]:\n childpos = rightpos\n # Move the larger child up.\n heap[pos] = heap[childpos]\n pos = childpos\n childpos = 2*pos + 1\n # The leaf at pos is empty now. Put newitem there, and bubble it up\n # to its final resting place (by sifting its parents down).\n heap[pos] = newitem\n _siftdown_max(heap, startpos, pos)\n\ndef merge(*iterables, key=None, reverse=False):\n '''Merge multiple sorted inputs into a single sorted output.\n\n Similar to sorted(itertools.chain(*iterables)) but returns a generator,\n does not pull the data into memory all at once, and assumes that each of\n the input streams is already sorted (smallest to largest).\n\n >>> list(merge([1,3,5,7], [0,2,4,8], [5,10,15,20], [], [25]))\n [0, 1, 2, 3, 4, 5, 5, 7, 8, 10, 15, 20, 25]\n\n If *key* is not None, applies a key function to each element to determine\n its sort order.\n\n >>> list(merge(['dog', 'horse'], ['cat', 'fish', 'kangaroo'], key=len))\n ['dog', 'cat', 'fish', 'horse', 'kangaroo']\n\n '''\n\n h = []\n h_append = h.append\n\n if reverse:\n _heapify = _heapify_max\n _heappop = _heappop_max\n _heapreplace = _heapreplace_max\n direction = -1\n else:\n _heapify = heapify\n _heappop = heappop\n _heapreplace = heapreplace\n direction = 1\n\n if key is None:\n for order, it in enumerate(map(iter, iterables)):\n try:\n next = it.__next__\n h_append([next(), order * direction, next])\n except StopIteration:\n pass\n _heapify(h)\n while len(h) > 1:\n try:\n while True:\n value, order, next = s = h[0]\n yield value\n s[0] = next() # raises StopIteration when exhausted\n _heapreplace(h, s) # restore heap condition\n except StopIteration:\n _heappop(h) # remove empty iterator\n if h:\n # fast case when only a single iterator remains\n value, order, next = h[0]\n yield value\n yield from next.__self__\n return\n\n for order, it in enumerate(map(iter, iterables)):\n try:\n next = it.__next__\n value = next()\n h_append([key(value), order * direction, value, next])\n except StopIteration:\n pass\n _heapify(h)\n while len(h) > 1:\n try:\n while True:\n key_value, order, value, next = s = h[0]\n yield value\n value = next()\n s[0] = key(value)\n s[2] = value\n _heapreplace(h, s)\n except StopIteration:\n _heappop(h)\n if h:\n key_value, order, value, next = h[0]\n yield value\n yield from next.__self__\n\n\n# Algorithm notes for nlargest() and nsmallest()\n# ==============================================\n#\n# Make a single pass over the data while keeping the k most extreme values\n# in a heap. Memory consumption is limited to keeping k values in a list.\n#\n# Measured performance for random inputs:\n#\n# number of comparisons\n# n inputs k-extreme values (average of 5 trials) % more than min()\n# ------------- ---------------- --------------------- -----------------\n# 1,000 100 3,317 231.7%\n# 10,000 100 14,046 40.5%\n# 100,000 100 105,749 5.7%\n# 1,000,000 100 1,007,751 0.8%\n# 10,000,000 100 10,009,401 0.1%\n#\n# Theoretical number of comparisons for k smallest of n random inputs:\n#\n# Step Comparisons Action\n# ---- -------------------------- ---------------------------\n# 1 1.66 * k heapify the first k-inputs\n# 2 n - k compare remaining elements to top of heap\n# 3 k * (1 + lg2(k)) * ln(n/k) replace the topmost value on the heap\n# 4 k * lg2(k) - (k/2) final sort of the k most extreme values\n#\n# Combining and simplifying for a rough estimate gives:\n#\n# comparisons = n + k * (log(k, 2) * log(n/k) + log(k, 2) + log(n/k))\n#\n# Computing the number of comparisons for step 3:\n# -----------------------------------------------\n# * For the i-th new value from the iterable, the probability of being in the\n# k most extreme values is k/i. For example, the probability of the 101st\n# value seen being in the 100 most extreme values is 100/101.\n# * If the value is a new extreme value, the cost of inserting it into the\n# heap is 1 + log(k, 2).\n# * The probability times the cost gives:\n# (k/i) * (1 + log(k, 2))\n# * Summing across the remaining n-k elements gives:\n# sum((k/i) * (1 + log(k, 2)) for i in range(k+1, n+1))\n# * This reduces to:\n# (H(n) - H(k)) * k * (1 + log(k, 2))\n# * Where H(n) is the n-th harmonic number estimated by:\n# gamma = 0.5772156649\n# H(n) = log(n, e) + gamma + 1 / (2 * n)\n# http://en.wikipedia.org/wiki/Harmonic_series_(mathematics)#Rate_of_divergence\n# * Substituting the H(n) formula:\n# comparisons = k * (1 + log(k, 2)) * (log(n/k, e) + (1/n - 1/k) / 2)\n#\n# Worst-case for step 3:\n# ----------------------\n# In the worst case, the input data is reversed sorted so that every new element\n# must be inserted in the heap:\n#\n# comparisons = 1.66 * k + log(k, 2) * (n - k)\n#\n# Alternative Algorithms\n# ----------------------\n# Other algorithms were not used because they:\n# 1) Took much more auxiliary memory,\n# 2) Made multiple passes over the data.\n# 3) Made more comparisons in common cases (small k, large n, semi-random input).\n# See the more detailed comparison of approach at:\n# http://code.activestate.com/recipes/577573-compare-algorithms-for-heapqsmallest\n\ndef nsmallest(n, iterable, key=None):\n \"\"\"Find the n smallest elements in a dataset.\n\n Equivalent to: sorted(iterable, key=key)[:n]\n \"\"\"\n\n # Short-cut for n==1 is to use min()\n if n == 1:\n it = iter(iterable)\n sentinel = object()\n if key is None:\n result = min(it, default=sentinel)\n else:\n result = min(it, default=sentinel, key=key)\n return [] if result is sentinel else [result]\n\n # When n>=size, it's faster to use sorted()\n try:\n size = len(iterable)\n except (TypeError, AttributeError):\n pass\n else:\n if n >= size:\n return sorted(iterable, key=key)[:n]\n\n # When key is none, use simpler decoration\n if key is None:\n it = iter(iterable)\n # put the range(n) first so that zip() doesn't\n # consume one too many elements from the iterator\n result = [(elem, i) for i, elem in zip(range(n), it)]\n if not result:\n return result\n _heapify_max(result)\n top = result[0][0]\n order = n\n _heapreplace = _heapreplace_max\n for elem in it:\n if elem < top:\n _heapreplace(result, (elem, order))\n top, _order = result[0]\n order += 1\n result.sort()\n return [elem for (elem, order) in result]\n\n # General case, slowest method\n it = iter(iterable)\n result = [(key(elem), i, elem) for i, elem in zip(range(n), it)]\n if not result:\n return result\n _heapify_max(result)\n top = result[0][0]\n order = n\n _heapreplace = _heapreplace_max\n for elem in it:\n k = key(elem)\n if k < top:\n _heapreplace(result, (k, order, elem))\n top, _order, _elem = result[0]\n order += 1\n result.sort()\n return [elem for (k, order, elem) in result]\n\ndef nlargest(n, iterable, key=None):\n \"\"\"Find the n largest elements in a dataset.\n\n Equivalent to: sorted(iterable, key=key, reverse=True)[:n]\n \"\"\"\n\n # Short-cut for n==1 is to use max()\n if n == 1:\n it = iter(iterable)\n sentinel = object()\n if key is None:\n result = max(it, default=sentinel)\n else:\n result = max(it, default=sentinel, key=key)\n return [] if result is sentinel else [result]\n\n # When n>=size, it's faster to use sorted()\n try:\n size = len(iterable)\n except (TypeError, AttributeError):\n pass\n else:\n if n >= size:\n return sorted(iterable, key=key, reverse=True)[:n]\n\n # When key is none, use simpler decoration\n if key is None:\n it = iter(iterable)\n result = [(elem, i) for i, elem in zip(range(0, -n, -1), it)]\n if not result:\n return result\n heapify(result)\n top = result[0][0]\n order = -n\n _heapreplace = heapreplace\n for elem in it:\n if top < elem:\n _heapreplace(result, (elem, order))\n top, _order = result[0]\n order -= 1\n result.sort(reverse=True)\n return [elem for (elem, order) in result]\n\n # General case, slowest method\n it = iter(iterable)\n result = [(key(elem), i, elem) for i, elem in zip(range(0, -n, -1), it)]\n if not result:\n return result\n heapify(result)\n top = result[0][0]\n order = -n\n _heapreplace = heapreplace\n for elem in it:\n k = key(elem)\n if top < k:\n _heapreplace(result, (k, order, elem))\n top, _order, _elem = result[0]\n order -= 1\n result.sort(reverse=True)\n return [elem for (k, order, elem) in result]\n\n# If available, use C implementation\ntry:\n from _heapq import *\nexcept ImportError:\n pass\ntry:\n from _heapq import _heapreplace_max\nexcept ImportError:\n pass\ntry:\n from _heapq import _heapify_max\nexcept ImportError:\n pass\ntry:\n from _heapq import _heappop_max\nexcept ImportError:\n pass\n\n\nif __name__ == \"__main__\":\n\n import doctest\n print(doctest.testmod())\n"},"license":{"kind":"string","value":"apache-2.0"}}},{"rowIdx":2711,"cells":{"repo_name":{"kind":"string","value":"xlqian/navitia"},"path":{"kind":"string","value":"release/script_release.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"16196"},"content":{"kind":"string","value":"# -*- coding: utf-8 -*-\n# Copyright (c) 2001-2018, Canal TP and/or its affiliates. All rights reserved.\n#\n# This file is part of Navitia,\n# the software to build cool stuff with public transport.\n#\n# Hope you'll enjoy and contribute to this project,\n# powered by Canal TP (www.canaltp.fr).\n# Help us simplify mobility and open public transport:\n# a non ending quest to the responsive locomotion way of traveling!\n#\n# LICENCE: This program is free software; you can redistribute it and/or modify\n# it under the terms of the GNU Affero General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Affero General Public License for more details.\n#\n# You should have received a copy of the GNU Affero General Public License\n# along with this program. If not, see .\n#\n# Stay tuned using\n# twitter @navitia\n# channel `#navitia` on riot https://riot.im/app/#/room/#navitia:matrix.org\n# https://groups.google.com/d/forum/navitia\n# www.navitia.io\nfrom __future__ import absolute_import, print_function, division\nimport os\n\nos.environ['LC_ALL'] = 'en_US'\nos.environ['GIT_PYTHON_TRACE'] = '1' # can be 0 (no trace), 1 (git commands) or full (git commands + git output)\n\nfrom git import *\nfrom datetime import datetime\nimport subprocess\nimport re\nfrom sys import exit, argv\nfrom shutil import copyfile\nfrom os import remove, stat\nimport codecs\nimport requests\nimport logging\n\n\ndef get_tag_name(version):\n return \"v{maj}.{min}.{hf}\".format(maj=version[0], min=version[1], hf=version[2])\n\n\nclass ReleaseManager:\n def __init__(self, release_type, remote_name=\"canalTP\"):\n self.directory = \"..\"\n self.changelog_filename = self.directory + \"/debian/changelog\"\n self.data_version_filename = self.directory + \"/source/type/data.cpp\"\n self.release_type = release_type\n self.repo = Repo(self.directory)\n self.git = self.repo.git\n\n # we fetch latest version from remote\n self.remote_name = remote_name\n\n print(\"fetching from {}...\".format(remote_name))\n self.repo.remote(remote_name).fetch(\"--tags\")\n\n # and we update dev and release branches\n print(\"rebasing dev and release...\")\n\n # TODO quit on error\n self.git.rebase(remote_name + \"/dev\", \"dev\")\n self.dev_data_version = self.get_data_version()\n remote_release = remote_name + \"/release\"\n try:\n self.git.checkout(\"-B release \", remote_release)\n except Exception as e:\n print(\"Cannot checkout 'release':{}, creating from distant branch\".format(str(e)))\n self.git.checkout(\"-b\", \"release\", remote_release)\n\n print(\"checking that release was merged into dev...\")\n unmerged = self.git.branch(\"--no-merged\", \"dev\", '--no-color')\n is_release_unmerged = re.search(\" release(\\n|$)\", unmerged)\n if is_release_unmerged:\n print(is_release_unmerged.group(0))\n print(\"ABORTING: {rem}/release branch was not merged in {rem}/dev\".format(rem=remote_name))\n print(\"This is required before releasing. You may use (be careful):\")\n print(\"git checkout dev; git submodule update --recursive\")\n print(\"git merge release\")\n\n exit(1)\n\n print(\"current branch: {}\".format(self.repo.active_branch))\n\n self.version = None\n self.str_version = \"\"\n self.latest_tag = \"\"\n\n # if API rate limit exceeded use, get 'personal access token' on github then provide:\n # self.auth = ('user', 'pass')\n self.auth = None\n\n def get_data_version(self):\n f_data_version = codecs.open(self.data_version_filename, 'r', 'utf-8')\n version = None\n for line in f_data_version:\n res = re.search('^ *const .*data_version *= *([0-9]+) *;.*$', line)\n if res:\n version = res.group(1)\n break\n\n if version is None:\n print(\"ABORTING: data_version could not be retrieved from {f}\".format(f=self.data_version_filename))\n exit(1)\n\n print(\"Current data_version is \" + version)\n\n try:\n return int(version)\n except ValueError:\n print(\"ABORTING: data_version {d} is not an Integer\".format(d=version))\n exit(1)\n\n def get_new_version_number(self):\n latest_version = None\n last_tag = self.git.describe('--tags', abbrev=0)\n\n version = re.search('.*(\\d+\\.\\d+\\.\\d+).*', last_tag)\n if version:\n latest_version = version.group(1)\n\n if not latest_version:\n print(\"no latest version found\")\n exit(1)\n\n version_n = latest_version.split('.')\n print(\"latest version is {}\".format(version_n))\n\n self.version = [int(i) for i in version_n]\n\n self.latest_tag = get_tag_name(self.version)\n print(\"last tag is \" + self.latest_tag)\n\n if self.release_type == \"regular\":\n if self.version[0] > self.dev_data_version:\n print(\n \"ABORTING: data_version {d} is < to latest tag {t}\".format(\n d=self.dev_data_version, t=self.latest_tag\n )\n )\n exit(1)\n elif self.version[0] < self.dev_data_version: # major version\n self.version[0] = self.dev_data_version\n self.version[1] = self.version[2] = 0\n else: # versions equal: minor version\n self.version[0] = self.dev_data_version\n self.version[1] += 1\n self.version[2] = 0\n\n elif self.release_type == \"major\":\n self.version[0] += 1\n self.version[1] = self.version[2] = 0\n\n elif self.release_type == \"minor\":\n self.version[1] += 1\n self.version[2] = 0\n\n elif self.release_type == \"hotfix\":\n self.version[2] += 1\n\n else:\n exit(5)\n\n if self.version[0] > self.dev_data_version:\n print(\n \"ABORTING: data_version {d} is < to tag {t} to be published\".format(\n d=self.dev_data_version, t=self.latest_tag\n )\n )\n exit(1)\n\n self.str_version = \"{maj}.{min}.{hf}\".format(\n maj=self.version[0], min=self.version[1], hf=self.version[2]\n )\n\n print(\"New version is {}\".format(self.str_version))\n return self.str_version\n\n def checkout_parent_branch(self):\n parent = \"\"\n if self.release_type == \"hotfix\":\n parent = \"release\"\n else:\n parent = \"dev\"\n\n self.git.checkout(parent)\n self.git.submodule('update', '--recursive')\n\n print(\"current branch {}\".format(self.repo.active_branch))\n\n def closed_pr_generator(self):\n # lazy get all closed PR ordered by last updated\n closed_pr = []\n page = 1\n while True:\n query = (\n \"https://api.github.com/repos/CanalTP/navitia/\"\n \"pulls?state=closed&base=dev&sort=updated&direction=desc&page={page}\".format(page=page)\n )\n print(\"query github api: \" + query)\n github_response = requests.get(query, auth=self.auth)\n\n if github_response.status_code != 200:\n message = github_response.json()['message']\n print(u' * Impossible to retrieve PR\\n * ' + message)\n return\n\n closed_pr = github_response.json()\n if not closed_pr:\n print(\"Reached end of PR list\")\n return\n\n for pr in closed_pr:\n yield pr\n\n page += 1\n\n def get_merged_pullrequest(self):\n lines = []\n nb_successive_merged_pr = 0\n for pr in self.closed_pr_generator():\n title = pr['title']\n url = pr['html_url']\n pr_head_sha = pr['head']['sha']\n # test if PR was merged (not simply closed)\n # and if distant/release contains HEAD of PR\n # (stops after 10 successive merged PR)\n if pr['merged_at']:\n branches = []\n try:\n branches = self.git.branch('-r', '--contains', pr_head_sha, '--no-color') + '\\n'\n except:\n print(\n \"ERROR while searching for commit in release branch: \"\n \"Following PR added to changelog, remove it if needed.\\n\"\n )\n\n # adding separators before and after to match only branch name\n release_branch_name = ' ' + self.remote_name + '/release\\n'\n if release_branch_name in branches:\n nb_successive_merged_pr += 1\n if nb_successive_merged_pr >= 10:\n break\n else:\n # doing the label search as late as possible to save api calls\n has_excluded_label = False\n label_query = pr['_links']['issue']['href'] + '/labels'\n labels = requests.get(label_query, auth=self.auth).json()\n if any(label['name'] in (\"hotfix\", \"not_in_changelog\") for label in labels):\n has_excluded_label = True\n\n if not has_excluded_label:\n lines.append(u' * {title} <{url}>\\n'.format(title=title, url=url))\n print(lines[-1])\n nb_successive_merged_pr = 0\n return lines\n\n def create_changelog(self):\n write_lines = [u'navitia2 (%s) unstable; urgency=low\\n' % self.str_version, u'\\n']\n\n if self.release_type != \"hotfix\":\n pullrequests = self.get_merged_pullrequest()\n write_lines.extend(pullrequests)\n else:\n write_lines.append(u' * \\n')\n\n author_name = self.git.config('user.name')\n author_mail = self.git.config('user.email')\n write_lines.extend(\n [\n u'\\n',\n u' -- {name} <{mail}> {now} +0100\\n'.format(\n name=author_name, mail=author_mail, now=datetime.now().strftime(\"%a, %d %b %Y %H:%m:%S\")\n ),\n u'\\n',\n ]\n )\n\n return write_lines\n\n def update_changelog(self):\n print(\"updating changelog\")\n changelog = self.create_changelog()\n\n f_changelog = None\n back_filename = self.changelog_filename + \"~\"\n try:\n f_changelog = codecs.open(self.changelog_filename, 'r', 'utf-8')\n except IOError:\n print(\"Unable to open file: \" + self.changelog_filename)\n exit(1)\n f_changelogback = codecs.open(back_filename, \"w\", \"utf-8\")\n\n for line in changelog:\n f_changelogback.write(line)\n\n for line in f_changelog:\n f_changelogback.write(line)\n f_changelog.close()\n f_changelogback.close()\n last_modified = stat(back_filename)\n (stdout, stderr) = subprocess.Popen(\n [\"vim\", back_filename, \"--nofork\"], stderr=subprocess.PIPE\n ).communicate()\n after = stat(back_filename)\n if last_modified == after:\n print(\"No changes made, we stop\")\n remove(back_filename)\n exit(2)\n\n copyfile(back_filename, self.changelog_filename)\n self.git.add(os.path.abspath(self.changelog_filename))\n\n def get_modified_changelog(self):\n # the changelog might have been modified by the user, so we have to read it again\n f_changelog = codecs.open(self.changelog_filename, 'r', 'utf-8')\n\n lines = []\n nb_version = 0\n for line in f_changelog:\n # each version are separated by a line like\n # navitia2 (0.94.1) unstable; urgency=low\n if line.startswith(\"navitia2 \"):\n nb_version += 1\n continue\n if nb_version >= 2:\n break # we can stop\n if nb_version == 0:\n continue\n\n lines.append(line + u'\\n')\n\n f_changelog.close()\n return lines\n\n def publish_release(self, temp_branch):\n self.git.checkout(\"release\")\n self.git.submodule('update', '--recursive')\n # merge with the release branch\n self.git.merge(temp_branch, \"release\", '--no-ff')\n\n print(\"current branch {}\".format(self.repo.active_branch))\n # we tag the release\n tag_message = u'Version {}\\n'.format(self.str_version)\n\n changelog = self.get_modified_changelog()\n for change in changelog:\n tag_message += change\n\n print(\"tag: \" + tag_message)\n\n self.repo.create_tag(get_tag_name(self.version), message=tag_message)\n\n # and we merge back the release branch to dev (at least for the tag in release)\n self.git.merge(\"release\", \"dev\", '--no-ff')\n\n print(\"publishing the release\")\n\n print(\"Check the release, you will probably want to merge release in dev:\")\n print(\" git checkout dev; git submodule update --recursive\")\n print(\" git merge release\")\n print(\"And when you're happy do:\")\n print(\" git push {} release dev --tags\".format(self.remote_name))\n # TODO: when we'll be confident, we will do that automaticaly\n\n def release_the_kraken(self, new_version):\n\n tmp_name = \"release_%s\" % new_version\n\n self.checkout_parent_branch()\n\n # we then create a new temporary branch\n print(\"creating temporary release branch {}\".format(tmp_name))\n self.git.checkout(b=tmp_name)\n print(\"current branch {}\".format(self.repo.active_branch))\n\n self.update_changelog()\n\n self.git.commit(m=\"Version %s\" % self.str_version)\n\n if self.release_type == \"hotfix\":\n print(\"now time to do your actual hotfix! (cherry-pick commits)\")\n print(\"PLEASE check that \\\"release\\\" COMPILES and TESTS!\")\n print(\"Note: you'll have to merge/tag/push manually after your fix:\")\n print(\" git checkout release\")\n print(\" git merge --no-ff {tmp_branch}\".format(tmp_branch=tmp_name))\n print(\n \" git tag -a {} #then add message on Version and mention concerned PRs\".format(\n get_tag_name(self.version)\n )\n )\n print(\" git checkout dev\")\n print(\" git merge --ff release\")\n print(\" git push {} release dev --tags\".format(self.remote_name))\n\n # TODO2 try to script that (put 2 hotfix param, like hotfix init and hotfix publish ?)\n exit(0)\n\n self.publish_release(tmp_name)\n\n\ndef get_release_type():\n if raw_input(\"Do you need a binarization ? [Y/n] \").lower() == \"y\":\n return \"major\"\n\n if raw_input(\"Have you changed the API or Data interface ? [Y/n] \").lower() == \"y\":\n return \"major\"\n\n if raw_input(\"Are the changes backward compatible ? [Y/n] \").lower() == \"y\":\n return \"minor\"\n\n if raw_input(\"Are you hotfixing ? [Y/n] \").lower() == \"y\":\n return \"hotfix\"\n\n raise RuntimeError(\"Couldn't find out the release type\")\n\n\nif __name__ == '__main__':\n\n if len(argv) < 1:\n print(\"mandatory argument: {regular|major|minor|hotfix}\")\n print(\"possible additional argument: remote (default is CanalTP)\")\n exit(5)\n\n logging.basicConfig(level=logging.INFO)\n\n release_type = get_release_type()\n remote = argv[1] if len(argv) >= 2 else \"CanalTP\"\n\n manager = ReleaseManager(release_type, remote_name=remote)\n\n new_version = manager.get_new_version_number()\n\n print(\"Release type: {}\".format(release_type))\n print(\"Release version: {}\".format(new_version))\n if raw_input(\"Shall we proceed ? [Y/n] \").lower() != \"y\":\n exit(6)\n\n manager.release_the_kraken(new_version)\n"},"license":{"kind":"string","value":"agpl-3.0"}}},{"rowIdx":2712,"cells":{"repo_name":{"kind":"string","value":"levilucio/SyVOLT"},"path":{"kind":"string","value":"GM2AUTOSAR_MM/graph_MT_post__directLink_T.py"},"copies":{"kind":"string","value":"2"},"size":{"kind":"string","value":"3614"},"content":{"kind":"string","value":"\"\"\"\n__graph_MT_post__directLink_T.py___________________________________________________________\n\nAutomatically generated LINK for entity MT_post__directLink_T\nDO NOT MODIFY DIRECTLY\n___________________________________________________________________________________________\n\"\"\"\nfrom graphLink import *\nfrom stickylink import *\nfrom widthXfillXdecoration import *\nclass graph_MT_post__directLink_T(graphLink):\n\n def __init__(self, xc, yc, semObject = None ):\n self.semObject = semObject\n self.semanticObject = semObject\n from linkEditor import *\n self.le=linkEditor(self,self.semObject, \"directLink_T\")\n self.le.FirstLink= stickylink()\n self.le.FirstLink.arrow=ATOM3Boolean()\n self.le.FirstLink.arrow.setValue((' ', 0))\n self.le.FirstLink.arrow.config = 0\n self.le.FirstLink.arrowShape1=ATOM3Integer(8)\n self.le.FirstLink.arrowShape2=ATOM3Integer(10)\n self.le.FirstLink.arrowShape3=ATOM3Integer(3)\n self.le.FirstLink.decoration=ATOM3Appearance()\n self.le.FirstLink.decoration.setValue( ('directLink_T_1stLink', self.le.FirstLink))\n self.le.FirstSegment= widthXfillXdecoration()\n self.le.FirstSegment.width=ATOM3Integer(2)\n self.le.FirstSegment.fill=ATOM3String('black', 20)\n self.le.FirstSegment.stipple=ATOM3String('', 20)\n self.le.FirstSegment.arrow=ATOM3Boolean()\n self.le.FirstSegment.arrow.setValue((' ', 0))\n self.le.FirstSegment.arrow.config = 0\n self.le.FirstSegment.arrowShape1=ATOM3Integer(8)\n self.le.FirstSegment.arrowShape2=ATOM3Integer(10)\n self.le.FirstSegment.arrowShape3=ATOM3Integer(3)\n self.le.FirstSegment.decoration=ATOM3Appearance()\n self.le.FirstSegment.decoration.setValue( ('directLink_T_1stSegment', self.le.FirstSegment))\n self.le.FirstSegment.decoration_Position=ATOM3Enum(['Up', 'Down', 'Middle', 'No decoration'],3,0)\n self.le.Center=ATOM3Appearance()\n self.le.Center.setValue( ('directLink_T_Center', self.le))\n self.le.SecondSegment= widthXfillXdecoration()\n self.le.SecondSegment.width=ATOM3Integer(2)\n self.le.SecondSegment.fill=ATOM3String('black', 20)\n self.le.SecondSegment.stipple=ATOM3String('', 20)\n self.le.SecondSegment.arrow=ATOM3Boolean()\n self.le.SecondSegment.arrow.setValue((' ', 0))\n self.le.SecondSegment.arrow.config = 0\n self.le.SecondSegment.arrowShape1=ATOM3Integer(8)\n self.le.SecondSegment.arrowShape2=ATOM3Integer(10)\n self.le.SecondSegment.arrowShape3=ATOM3Integer(3)\n self.le.SecondSegment.decoration=ATOM3Appearance()\n self.le.SecondSegment.decoration.setValue( ('directLink_T_2ndSegment', self.le.SecondSegment))\n self.le.SecondSegment.decoration_Position=ATOM3Enum(['Up', 'Down', 'Middle', 'No decoration'],3,0)\n self.le.SecondLink= stickylink()\n self.le.SecondLink.arrow=ATOM3Boolean()\n self.le.SecondLink.arrow.setValue((' ', 1))\n self.le.SecondLink.arrow.config = 0\n self.le.SecondLink.arrowShape1=ATOM3Integer(8)\n self.le.SecondLink.arrowShape2=ATOM3Integer(10)\n self.le.SecondLink.arrowShape3=ATOM3Integer(3)\n self.le.SecondLink.decoration=ATOM3Appearance()\n self.le.SecondLink.decoration.setValue( ('directLink_T_2ndLink', self.le.SecondLink))\n self.le.FirstLink.decoration.semObject=self.semObject\n self.le.FirstSegment.decoration.semObject=self.semObject\n self.le.Center.semObject=self.semObject\n self.le.SecondSegment.decoration.semObject=self.semObject\n self.le.SecondLink.decoration.semObject=self.semObject\n graphLink.__init__(self, xc, yc, self.le,semObject)\n"},"license":{"kind":"string","value":"mit"}}},{"rowIdx":2713,"cells":{"repo_name":{"kind":"string","value":"pycrystem/pycrystem"},"path":{"kind":"string","value":"pyxem/signals/tensor_field.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"3246"},"content":{"kind":"string","value":"# -*- coding: utf-8 -*-\n# Copyright 2016-2020 The pyXem developers\n#\n# This file is part of pyXem.\n#\n# pyXem is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# pyXem is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with pyXem. If not, see .\n\nfrom hyperspy.signals import Signal2D\nimport numpy as np\nfrom scipy.linalg import polar\nfrom hyperspy.utils import stack\nimport math\nfrom pyxem.signals.strain_map import StrainMap\n\n\"\"\"\nSignal class for Tensor Fields\n\"\"\"\n\n\ndef _polar_decomposition(image, side):\n \"\"\"Perform a polar decomposition of a second rank tensor.\n\n Parameters\n ----------\n image : np.array()\n Matrix on which to form polar decomposition.\n side : str\n 'left' or 'right' the side on which to perform polar decomposition.\n\n Returns\n -------\n U, R : np.array()\n Stretch and rotation matrices obtained by polar decomposition.\n\n \"\"\"\n return np.array(polar(image, side=side))\n\n\ndef _get_rotation_angle(matrix):\n \"\"\"Find the rotation angle associated with a given rotation matrix.\n\n Parameters\n ----------\n matrix : np.array()\n A rotation matrix.\n\n Returns\n -------\n angle : np.array()\n Rotation angle associated with matrix.\n\n \"\"\"\n return np.array(-math.asin(matrix[1, 0]))\n\n\nclass DisplacementGradientMap(Signal2D):\n _signal_type = \"tensor_field\"\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n # Check that the signal dimensions are (3,3) for it to be a valid\n # TensorField\n\n def polar_decomposition(self):\n \"\"\"Perform polar decomposition on the second rank tensors describing\n the TensorField. The polar decomposition is right handed and given by\n :math:`D = RU`\n\n Returns\n -------\n R : TensorField\n The orthogonal matrix describing the rotation field.\n\n U : TensorField\n The strain tensor field.\n\n \"\"\"\n RU = self.map(_polar_decomposition, side=\"right\", inplace=False)\n return RU.isig[:, :, 0], RU.isig[:, :, 1]\n\n def get_strain_maps(self):\n \"\"\"Obtain strain maps from the displacement gradient tensor at each\n navigation position in the small strain approximation.\n\n Returns\n -------\n\n strain_results : BaseSignal\n Signal of shape < 4 | , > , navigation order is e11,e22,e12,theta\n \"\"\"\n R, U = self.polar_decomposition()\n\n e11 = -U.isig[0, 0].T + 1\n e12 = U.isig[0, 1].T\n e21 = U.isig[1, 0].T\n e22 = -U.isig[1, 1].T + 1\n theta = R.map(_get_rotation_angle, inplace=False)\n theta.axes_manager.set_signal_dimension(2)\n\n strain_results = stack([e11, e22, e12, theta])\n\n return StrainMap(strain_results)\n"},"license":{"kind":"string","value":"gpl-3.0"}}},{"rowIdx":2714,"cells":{"repo_name":{"kind":"string","value":"srm912/servo"},"path":{"kind":"string","value":"tests/wpt/web-platform-tests/html/tools/update_html5lib_tests.py"},"copies":{"kind":"string","value":"125"},"size":{"kind":"string","value":"5358"},"content":{"kind":"string","value":"import sys\nimport os\nimport hashlib\nimport urllib\nimport itertools\nimport re\nimport json\nimport glob\nimport shutil\n\ntry:\n import genshi\n from genshi.template import MarkupTemplate\n\n from html5lib.tests import support\nexcept ImportError:\n print \"\"\"This script requires the Genshi templating library and html5lib source\n\nIt is recommended that these are installed in a virtualenv:\n\nvirtualenv venv\nsource venv/bin/activate\npip install genshi\ncd venv\ngit clone git@github.com:html5lib/html5lib-python.git html5lib\ncd html5lib\ngit submodule init\ngit submodule update\npip install -e ./\n\nThen run this script again, with the virtual environment still active.\nWhen you are done, type \"deactivate\" to deactivate the virtual environment.\n\"\"\"\n\nTESTS_PATH = \"html/syntax/parsing/\"\n\ndef get_paths():\n script_path = os.path.split(os.path.abspath(__file__))[0]\n repo_base = get_repo_base(script_path)\n tests_path = os.path.join(repo_base, TESTS_PATH)\n return script_path, tests_path\n\ndef get_repo_base(path):\n while path:\n if os.path.exists(os.path.join(path, \".git\")):\n return path\n else:\n path = os.path.split(path)[0]\n\ndef get_expected(data):\n data = \"#document\\n\" + data\n return data\n\ndef get_hash(data, container=None):\n if container == None:\n container = \"\"\n return hashlib.sha1(\"#container%s#data%s\"%(container.encode(\"utf8\"),\n data.encode(\"utf8\"))).hexdigest()\n\ndef make_tests(script_dir, out_dir, input_file_name, test_data):\n tests = []\n innerHTML_tests = []\n ids_seen = {}\n print input_file_name\n for test in test_data:\n if \"script-off\" in test:\n continue\n is_innerHTML = \"document-fragment\" in test\n data = test[\"data\"]\n container = test[\"document-fragment\"] if is_innerHTML else None\n assert test[\"document\"], test\n expected = get_expected(test[\"document\"])\n test_list = innerHTML_tests if is_innerHTML else tests\n test_id = get_hash(data, container)\n if test_id in ids_seen:\n print \"WARNING: id %s seen multiple times in file %s this time for test (%s, %s) before for test %s, skipping\"%(test_id, input_file_name, container, data, ids_seen[test_id])\n continue\n ids_seen[test_id] = (container, data)\n test_list.append({'string_uri_encoded_input':\"\\\"%s\\\"\"%urllib.quote(data.encode(\"utf8\")),\n 'input':data,\n 'expected':expected,\n 'string_escaped_expected':json.dumps(urllib.quote(expected.encode(\"utf8\"))),\n 'id':test_id,\n 'container':container\n })\n path_normal = None\n if tests:\n path_normal = write_test_file(script_dir, out_dir,\n tests, \"html5lib_%s\"%input_file_name,\n \"html5lib_test.xml\")\n path_innerHTML = None\n if innerHTML_tests:\n path_innerHTML = write_test_file(script_dir, out_dir,\n innerHTML_tests, \"html5lib_innerHTML_%s\"%input_file_name,\n \"html5lib_test_fragment.xml\")\n\n return path_normal, path_innerHTML\n\ndef write_test_file(script_dir, out_dir, tests, file_name, template_file_name):\n file_name = os.path.join(out_dir, file_name + \".html\")\n short_name = os.path.split(file_name)[1]\n\n with open(os.path.join(script_dir, template_file_name)) as f:\n template = MarkupTemplate(f)\n\n stream = template.generate(file_name=short_name, tests=tests)\n\n with open(file_name, \"w\") as f:\n f.write(stream.render('html', doctype='html5',\n encoding=\"utf8\"))\n return file_name\n\ndef escape_js_string(in_data):\n return in_data.encode(\"utf8\").encode(\"string-escape\")\n\ndef serialize_filenames(test_filenames):\n return \"[\" + \",\\n\".join(\"\\\"%s\\\"\"%item for item in test_filenames) + \"]\"\n\ndef main():\n\n script_dir, out_dir = get_paths()\n\n test_files = []\n inner_html_files = []\n\n if len(sys.argv) > 2:\n test_iterator = itertools.izip(\n itertools.repeat(False),\n sorted(os.path.abspath(item) for item in\n glob.glob(os.path.join(sys.argv[2], \"*.dat\"))))\n else:\n test_iterator = itertools.chain(\n itertools.izip(itertools.repeat(False),\n sorted(support.get_data_files(\"tree-construction\"))),\n itertools.izip(itertools.repeat(True),\n sorted(support.get_data_files(\n os.path.join(\"tree-construction\", \"scripted\")))))\n\n for (scripted, test_file) in test_iterator:\n input_file_name = os.path.splitext(os.path.split(test_file)[1])[0]\n if scripted:\n input_file_name = \"scripted_\" + input_file_name\n test_data = support.TestData(test_file)\n test_filename, inner_html_file_name = make_tests(script_dir, out_dir,\n input_file_name, test_data)\n if test_filename is not None:\n test_files.append(test_filename)\n if inner_html_file_name is not None:\n inner_html_files.append(inner_html_file_name)\n\nif __name__ == \"__main__\":\n main()\n"},"license":{"kind":"string","value":"mpl-2.0"}}},{"rowIdx":2715,"cells":{"repo_name":{"kind":"string","value":"NSAmelchev/ignite"},"path":{"kind":"string","value":"modules/platforms/python/pyignite/cache.py"},"copies":{"kind":"string","value":"11"},"size":{"kind":"string","value":"22098"},"content":{"kind":"string","value":"# Licensed to the Apache Software Foundation (ASF) under one or more\n# contributor license agreements. See the NOTICE file distributed with\n# this work for additional information regarding copyright ownership.\n# The ASF licenses this file to You under the Apache License, Version 2.0\n# (the \"License\"); you may not use this file except in compliance with\n# the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom typing import Any, Iterable, Optional, Union\n\nfrom .datatypes import prop_codes\nfrom .exceptions import (\n CacheCreationError, CacheError, ParameterError, SQLError,\n)\nfrom .utils import cache_id, is_wrapped, status_to_exception, unwrap_binary\nfrom .api.cache_config import (\n cache_create, cache_create_with_config,\n cache_get_or_create, cache_get_or_create_with_config,\n cache_destroy, cache_get_configuration,\n)\nfrom .api.key_value import (\n cache_get, cache_put, cache_get_all, cache_put_all, cache_replace,\n cache_clear, cache_clear_key, cache_clear_keys,\n cache_contains_key, cache_contains_keys,\n cache_get_and_put, cache_get_and_put_if_absent, cache_put_if_absent,\n cache_get_and_remove, cache_get_and_replace,\n cache_remove_key, cache_remove_keys, cache_remove_all,\n cache_remove_if_equals, cache_replace_if_equals, cache_get_size,\n)\nfrom .api.sql import scan, scan_cursor_get_page, sql, sql_cursor_get_page\n\n\nPROP_CODES = set([\n getattr(prop_codes, x)\n for x in dir(prop_codes)\n if x.startswith('PROP_')\n])\nCACHE_CREATE_FUNCS = {\n True: {\n True: cache_get_or_create_with_config,\n False: cache_create_with_config,\n },\n False: {\n True: cache_get_or_create,\n False: cache_create,\n },\n}\n\n\nclass Cache:\n \"\"\"\n Ignite cache abstraction. Users should never use this class directly,\n but construct its instances with\n :py:meth:`~pyignite.client.Client.create_cache`,\n :py:meth:`~pyignite.client.Client.get_or_create_cache` or\n :py:meth:`~pyignite.client.Client.get_cache` methods instead. See\n :ref:`this example ` on how to do it.\n \"\"\"\n _cache_id = None\n _name = None\n _client = None\n _settings = None\n\n @staticmethod\n def _validate_settings(\n settings: Union[str, dict]=None, get_only: bool=False,\n ):\n if any([\n not settings,\n type(settings) not in (str, dict),\n type(settings) is dict and prop_codes.PROP_NAME not in settings,\n ]):\n raise ParameterError('You should supply at least cache name')\n\n if all([\n type(settings) is dict,\n not set(settings).issubset(PROP_CODES),\n ]):\n raise ParameterError('One or more settings was not recognized')\n\n if get_only and type(settings) is dict and len(settings) != 1:\n raise ParameterError('Only cache name allowed as a parameter')\n\n def __init__(\n self, client: 'Client', settings: Union[str, dict]=None,\n with_get: bool=False, get_only: bool=False,\n ):\n \"\"\"\n Initialize cache object.\n\n :param client: Ignite client,\n :param settings: cache settings. Can be a string (cache name) or a dict\n of cache properties and their values. In this case PROP_NAME is\n mandatory,\n :param with_get: (optional) do not raise exception, if the cache\n is already exists. Defaults to False,\n :param get_only: (optional) do not communicate with Ignite server\n at all, only create Cache instance. Defaults to False.\n \"\"\"\n self._client = client\n self._validate_settings(settings)\n if type(settings) == str:\n self._name = settings\n else:\n self._name = settings[prop_codes.PROP_NAME]\n\n if not get_only:\n func = CACHE_CREATE_FUNCS[type(settings) is dict][with_get]\n result = func(client, settings)\n if result.status != 0:\n raise CacheCreationError(result.message)\n\n self._cache_id = cache_id(self._name)\n\n @property\n def settings(self) -> Optional[dict]:\n \"\"\"\n Lazy Cache settings. See the :ref:`example `\n of reading this property.\n\n All cache properties are documented here: :ref:`cache_props`.\n\n :return: dict of cache properties and their values.\n \"\"\"\n if self._settings is None:\n config_result = cache_get_configuration(self._client, self._cache_id)\n if config_result.status == 0:\n self._settings = config_result.value\n else:\n raise CacheError(config_result.message)\n\n return self._settings\n\n @property\n def name(self) -> str:\n \"\"\"\n Lazy cache name.\n\n :return: cache name string.\n \"\"\"\n if self._name is None:\n self._name = self.settings[prop_codes.PROP_NAME]\n\n return self._name\n\n @property\n def client(self) -> 'Client':\n \"\"\"\n Ignite :class:`~pyignite.client.Client` object.\n\n :return: Client object, through which the cache is accessed.\n \"\"\"\n return self._client\n\n @property\n def cache_id(self) -> int:\n \"\"\"\n Cache ID.\n\n :return: integer value of the cache ID.\n \"\"\"\n return self._cache_id\n\n def _process_binary(self, value: Any) -> Any:\n \"\"\"\n Detects and recursively unwraps Binary Object.\n\n :param value: anything that could be a Binary Object,\n :return: the result of the Binary Object unwrapping with all other data\n left intact.\n \"\"\"\n if is_wrapped(value):\n return unwrap_binary(self._client, value)\n return value\n\n @status_to_exception(CacheError)\n def destroy(self):\n \"\"\"\n Destroys cache with a given name.\n \"\"\"\n return cache_destroy(self._client, self._cache_id)\n\n @status_to_exception(CacheError)\n def get(self, key, key_hint: object=None) -> Any:\n \"\"\"\n Retrieves a value from cache by key.\n\n :param key: key for the cache entry. Can be of any supported type,\n :param key_hint: (optional) Ignite data type, for which the given key\n should be converted,\n :return: value retrieved.\n \"\"\"\n result = cache_get(self._client, self._cache_id, key, key_hint=key_hint)\n result.value = self._process_binary(result.value)\n return result\n\n @status_to_exception(CacheError)\n def put(self, key, value, key_hint: object=None, value_hint: object=None):\n \"\"\"\n Puts a value with a given key to cache (overwriting existing value\n if any).\n\n :param key: key for the cache entry. Can be of any supported type,\n :param value: value for the key,\n :param key_hint: (optional) Ignite data type, for which the given key\n should be converted,\n :param value_hint: (optional) Ignite data type, for which the given\n value should be converted.\n \"\"\"\n return cache_put(\n self._client, self._cache_id, key, value,\n key_hint=key_hint, value_hint=value_hint\n )\n\n @status_to_exception(CacheError)\n def get_all(self, keys: list) -> list:\n \"\"\"\n Retrieves multiple key-value pairs from cache.\n\n :param keys: list of keys or tuples of (key, key_hint),\n :return: a dict of key-value pairs.\n \"\"\"\n result = cache_get_all(self._client, self._cache_id, keys)\n if result.value:\n for key, value in result.value.items():\n result.value[key] = self._process_binary(value)\n return result\n\n @status_to_exception(CacheError)\n def put_all(self, pairs: dict):\n \"\"\"\n Puts multiple key-value pairs to cache (overwriting existing\n associations if any).\n\n :param pairs: dictionary type parameters, contains key-value pairs\n to save. Each key or value can be an item of representable\n Python type or a tuple of (item, hint),\n \"\"\"\n return cache_put_all(self._client, self._cache_id, pairs)\n\n @status_to_exception(CacheError)\n def replace(\n self, key, value, key_hint: object=None, value_hint: object=None\n ):\n \"\"\"\n Puts a value with a given key to cache only if the key already exist.\n\n :param key: key for the cache entry. Can be of any supported type,\n :param value: value for the key,\n :param key_hint: (optional) Ignite data type, for which the given key\n should be converted,\n :param value_hint: (optional) Ignite data type, for which the given\n value should be converted.\n \"\"\"\n result = cache_replace(\n self._client, self._cache_id, key, value,\n key_hint=key_hint, value_hint=value_hint\n )\n result.value = self._process_binary(result.value)\n return result\n\n @status_to_exception(CacheError)\n def clear(self, keys: Optional[list]=None):\n \"\"\"\n Clears the cache without notifying listeners or cache writers.\n\n :param keys: (optional) list of cache keys or (key, key type\n hint) tuples to clear (default: clear all).\n \"\"\"\n if keys:\n return cache_clear_keys(self._client, self._cache_id, keys)\n else:\n return cache_clear(self._client, self._cache_id)\n\n @status_to_exception(CacheError)\n def clear_key(self, key, key_hint: object=None):\n \"\"\"\n Clears the cache key without notifying listeners or cache writers.\n\n :param key: key for the cache entry,\n :param key_hint: (optional) Ignite data type, for which the given key\n should be converted,\n \"\"\"\n return cache_clear_key(\n self._client, self._cache_id, key, key_hint=key_hint\n )\n\n @status_to_exception(CacheError)\n def contains_key(self, key, key_hint=None) -> bool:\n \"\"\"\n Returns a value indicating whether given key is present in cache.\n\n :param key: key for the cache entry. Can be of any supported type,\n :param key_hint: (optional) Ignite data type, for which the given key\n should be converted,\n :return: boolean `True` when key is present, `False` otherwise.\n \"\"\"\n return cache_contains_key(\n self._client, self._cache_id, key, key_hint=key_hint\n )\n\n @status_to_exception(CacheError)\n def contains_keys(self, keys: Iterable) -> bool:\n \"\"\"\n Returns a value indicating whether all given keys are present in cache.\n\n :param keys: a list of keys or (key, type hint) tuples,\n :return: boolean `True` when all keys are present, `False` otherwise.\n \"\"\"\n return cache_contains_keys(self._client, self._cache_id, keys)\n\n @status_to_exception(CacheError)\n def get_and_put(self, key, value, key_hint=None, value_hint=None) -> Any:\n \"\"\"\n Puts a value with a given key to cache, and returns the previous value\n for that key, or null value if there was not such key.\n\n :param key: key for the cache entry. Can be of any supported type,\n :param value: value for the key,\n :param key_hint: (optional) Ignite data type, for which the given key\n should be converted,\n :param value_hint: (optional) Ignite data type, for which the given\n value should be converted.\n :return: old value or None.\n \"\"\"\n result = cache_get_and_put(\n self._client, self._cache_id, key, value, key_hint, value_hint\n )\n result.value = self._process_binary(result.value)\n return result\n\n @status_to_exception(CacheError)\n def get_and_put_if_absent(\n self, key, value, key_hint=None, value_hint=None\n ):\n \"\"\"\n Puts a value with a given key to cache only if the key does not\n already exist.\n\n :param key: key for the cache entry. Can be of any supported type,\n :param value: value for the key,\n :param key_hint: (optional) Ignite data type, for which the given key\n should be converted,\n :param value_hint: (optional) Ignite data type, for which the given\n value should be converted,\n :return: old value or None.\n \"\"\"\n result = cache_get_and_put_if_absent(\n self._client, self._cache_id, key, value, key_hint, value_hint\n )\n result.value = self._process_binary(result.value)\n return result\n\n @status_to_exception(CacheError)\n def put_if_absent(self, key, value, key_hint=None, value_hint=None):\n \"\"\"\n Puts a value with a given key to cache only if the key does not\n already exist.\n\n :param key: key for the cache entry. Can be of any supported type,\n :param value: value for the key,\n :param key_hint: (optional) Ignite data type, for which the given key\n should be converted,\n :param value_hint: (optional) Ignite data type, for which the given\n value should be converted.\n \"\"\"\n return cache_put_if_absent(\n self._client, self._cache_id, key, value, key_hint, value_hint\n )\n\n @status_to_exception(CacheError)\n def get_and_remove(self, key, key_hint=None) -> Any:\n \"\"\"\n Removes the cache entry with specified key, returning the value.\n\n :param key: key for the cache entry. Can be of any supported type,\n :param key_hint: (optional) Ignite data type, for which the given key\n should be converted,\n :return: old value or None.\n \"\"\"\n result = cache_get_and_remove(\n self._client, self._cache_id, key, key_hint\n )\n result.value = self._process_binary(result.value)\n return result\n\n @status_to_exception(CacheError)\n def get_and_replace(\n self, key, value, key_hint=None, value_hint=None\n ) -> Any:\n \"\"\"\n Puts a value with a given key to cache, returning previous value\n for that key, if and only if there is a value currently mapped\n for that key.\n\n :param key: key for the cache entry. Can be of any supported type,\n :param value: value for the key,\n :param key_hint: (optional) Ignite data type, for which the given key\n should be converted,\n :param value_hint: (optional) Ignite data type, for which the given\n value should be converted.\n :return: old value or None.\n \"\"\"\n result = cache_get_and_replace(\n self._client, self._cache_id, key, value, key_hint, value_hint\n )\n result.value = self._process_binary(result.value)\n return result\n\n @status_to_exception(CacheError)\n def remove_key(self, key, key_hint=None):\n \"\"\"\n Clears the cache key without notifying listeners or cache writers.\n\n :param key: key for the cache entry,\n :param key_hint: (optional) Ignite data type, for which the given key\n should be converted,\n \"\"\"\n return cache_remove_key(self._client, self._cache_id, key, key_hint)\n\n @status_to_exception(CacheError)\n def remove_keys(self, keys: list):\n \"\"\"\n Removes cache entries by given list of keys, notifying listeners\n and cache writers.\n\n :param keys: list of keys or tuples of (key, key_hint) to remove.\n \"\"\"\n return cache_remove_keys(self._client, self._cache_id, keys)\n\n @status_to_exception(CacheError)\n def remove_all(self):\n \"\"\"\n Removes all cache entries, notifying listeners and cache writers.\n \"\"\"\n return cache_remove_all(self._client, self._cache_id)\n\n @status_to_exception(CacheError)\n def remove_if_equals(self, key, sample, key_hint=None, sample_hint=None):\n \"\"\"\n Removes an entry with a given key if provided value is equal to\n actual value, notifying listeners and cache writers.\n\n :param key: key for the cache entry,\n :param sample: a sample to compare the stored value with,\n :param key_hint: (optional) Ignite data type, for which the given key\n should be converted,\n :param sample_hint: (optional) Ignite data type, for whic\n the given sample should be converted.\n \"\"\"\n return cache_remove_if_equals(\n self._client, self._cache_id, key, sample, key_hint, sample_hint\n )\n\n @status_to_exception(CacheError)\n def replace_if_equals(\n self, key, sample, value,\n key_hint=None, sample_hint=None, value_hint=None\n ) -> Any:\n \"\"\"\n Puts a value with a given key to cache only if the key already exists\n and value equals provided sample.\n\n :param key: key for the cache entry,\n :param sample: a sample to compare the stored value with,\n :param value: new value for the given key,\n :param key_hint: (optional) Ignite data type, for which the given key\n should be converted,\n :param sample_hint: (optional) Ignite data type, for whic\n the given sample should be converted\n :param value_hint: (optional) Ignite data type, for which the given\n value should be converted,\n :return: boolean `True` when key is present, `False` otherwise.\n \"\"\"\n result = cache_replace_if_equals(\n self._client, self._cache_id, key, sample, value,\n key_hint, sample_hint, value_hint\n )\n result.value = self._process_binary(result.value)\n return result\n\n @status_to_exception(CacheError)\n def get_size(self, peek_modes=0):\n \"\"\"\n Gets the number of entries in cache.\n\n :param peek_modes: (optional) limit count to near cache partition\n (PeekModes.NEAR), primary cache (PeekModes.PRIMARY), or backup cache\n (PeekModes.BACKUP). Defaults to all cache partitions (PeekModes.ALL),\n :return: integer number of cache entries.\n \"\"\"\n return cache_get_size(self._client, self._cache_id, peek_modes)\n\n def scan(self, page_size: int=1, partitions: int=-1, local: bool=False):\n \"\"\"\n Returns all key-value pairs from the cache, similar to `get_all`, but\n with internal pagination, which is slower, but safer.\n\n :param page_size: (optional) page size. Default size is 1 (slowest\n and safest),\n :param partitions: (optional) number of partitions to query\n (negative to query entire cache),\n :param local: (optional) pass True if this query should be executed\n on local node only. Defaults to False,\n :return: generator with key-value pairs.\n \"\"\"\n result = scan(self._client, self._cache_id, page_size, partitions, local)\n if result.status != 0:\n raise CacheError(result.message)\n\n cursor = result.value['cursor']\n for k, v in result.value['data'].items():\n k = self._process_binary(k)\n v = self._process_binary(v)\n yield k, v\n\n while result.value['more']:\n result = scan_cursor_get_page(self._client, cursor)\n if result.status != 0:\n raise CacheError(result.message)\n\n for k, v in result.value['data'].items():\n k = self._process_binary(k)\n v = self._process_binary(v)\n yield k, v\n\n def select_row(\n self, query_str: str, page_size: int=1,\n query_args: Optional[list]=None, distributed_joins: bool=False,\n replicated_only: bool=False, local: bool=False, timeout: int=0\n ):\n \"\"\"\n Executes a simplified SQL SELECT query over data stored in the cache.\n The query returns the whole record (key and value).\n\n :param query_str: SQL query string,\n :param page_size: (optional) cursor page size. Default is 1, which\n means that client makes one server call per row,\n :param query_args: (optional) query arguments,\n :param distributed_joins: (optional) distributed joins. Defaults\n to False,\n :param replicated_only: (optional) whether query contains only\n replicated tables or not. Defaults to False,\n :param local: (optional) pass True if this query should be executed\n on local node only. Defaults to False,\n :param timeout: (optional) non-negative timeout value in ms. Zero\n disables timeout (default),\n :return: generator with key-value pairs.\n \"\"\"\n def generate_result(value):\n cursor = value['cursor']\n more = value['more']\n for k, v in value['data'].items():\n k = self._process_binary(k)\n v = self._process_binary(v)\n yield k, v\n\n while more:\n inner_result = sql_cursor_get_page(self._client, cursor)\n if result.status != 0:\n raise SQLError(result.message)\n more = inner_result.value['more']\n for k, v in inner_result.value['data'].items():\n k = self._process_binary(k)\n v = self._process_binary(v)\n yield k, v\n\n type_name = self.settings[\n prop_codes.PROP_QUERY_ENTITIES\n ][0]['value_type_name']\n if not type_name:\n raise SQLError('Value type is unknown')\n result = sql(\n self._client,\n self._cache_id,\n type_name,\n query_str,\n page_size,\n query_args,\n distributed_joins,\n replicated_only,\n local,\n timeout\n )\n if result.status != 0:\n raise SQLError(result.message)\n\n return generate_result(result.value)\n"},"license":{"kind":"string","value":"apache-2.0"}}},{"rowIdx":2716,"cells":{"repo_name":{"kind":"string","value":"mabushadi/dpxdt"},"path":{"kind":"string","value":"dpxdt/tools/diff_my_urls.py"},"copies":{"kind":"string","value":"7"},"size":{"kind":"string","value":"6027"},"content":{"kind":"string","value":"#!/usr/bin/env python\n# Copyright 2014 Brett Slatkin\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Utility for diffing a set of URL pairs defined in a config file.\n\nExample usage:\n\n./dpxdt/tools/diff_my_urls.py \\\n --upload_build_id=1234 \\\n --release_server_prefix=https://my-dpxdt-apiserver.example.com/api \\\n --release_client_id= \\\n --release_client_secret= \\\n --upload_release_name=\"My release name\" \\\n --release_cut_url=http://example.com/path/to/my/release/tool/for/this/cut\n --tests_json_path=my_url_tests.json\n\nExample input file \"my_url_tests.json\". One entry per test:\n\n[\n {\n \"name\": \"My homepage\",\n \"run_url\": \"http://localhost:5000/static/dummy/dummy_page1.html\",\n \"run_config\": {\n \"viewportSize\": {\n \"width\": 1024,\n \"height\": 768\n },\n \"injectCss\": \"#foobar { background-color: lime\",\n \"injectJs\": \"document.getElementById('foobar').innerText = 'bar';\",\n },\n \"ref_url\": \"http://localhost:5000/static/dummy/dummy_page1.html\",\n \"ref_config\": {\n \"viewportSize\": {\n \"width\": 1024,\n \"height\": 768\n },\n \"injectCss\": \"#foobar { background-color: goldenrod; }\",\n \"injectJs\": \"document.getElementById('foobar').innerText = 'foo';\",\n }\n },\n ...\n]\n\nSee README.md for documentation of config parameters.\n\"\"\"\n\nimport datetime\nimport json\nimport logging\nimport sys\n\n# Local Libraries\nimport gflags\nFLAGS = gflags.FLAGS\n\n# Local modules\nfrom dpxdt.client import fetch_worker\nfrom dpxdt.client import release_worker\nfrom dpxdt.client import workers\nimport flags\n\n\nclass Test(object):\n \"\"\"Represents the JSON of a single test.\"\"\"\n\n def __init__(self, name=None, run_url=None, run_config=None,\n ref_url=None, ref_config=None):\n self.name = name\n self.run_url = run_url\n self.run_config_data = json.dumps(run_config) if run_config else None\n self.ref_url = ref_url\n self.ref_config_data = json.dumps(ref_config) if ref_config else None\n\n\ndef load_tests(data):\n \"\"\"Loads JSON data and returns a list of Test objects it contains.\"\"\"\n test_list = json.loads(data)\n results = []\n for test_json in test_list:\n results.append(Test(**test_json))\n return results\n\n\nclass DiffMyUrls(workers.WorkflowItem):\n \"\"\"Workflow for diffing a set of URL pairs defined in a config file.\n\n Args:\n release_url: URL of the newest and best version of the page.\n tests: List of Test objects to test.\n upload_build_id: Optional. Build ID of the site being compared. When\n supplied a new release will be cut for this build comparing it\n to the last good release.\n upload_release_name: Optional. Release name to use for the build. When\n not supplied, a new release based on the current time will be\n created.\n heartbeat: Function to call with progress status.\n \"\"\"\n\n def run(self,\n release_url,\n tests,\n upload_build_id,\n upload_release_name,\n heartbeat=None):\n if not upload_release_name:\n upload_release_name = str(datetime.datetime.utcnow())\n\n yield heartbeat('Creating release %s' % upload_release_name)\n release_number = yield release_worker.CreateReleaseWorkflow(\n upload_build_id, upload_release_name, release_url)\n\n pending_uploads = []\n for test in tests:\n item = release_worker.RequestRunWorkflow(\n upload_build_id, upload_release_name, release_number,\n test.name, url=test.run_url, config_data=test.run_config_data,\n ref_url=test.ref_url, ref_config_data=test.ref_config_data)\n pending_uploads.append(item)\n\n yield heartbeat('Requesting %d runs' % len(pending_uploads))\n yield pending_uploads\n\n yield heartbeat('Marking runs as complete')\n release_url = yield release_worker.RunsDoneWorkflow(\n upload_build_id, upload_release_name, release_number)\n\n yield heartbeat('Results viewable at: %s' % release_url)\n\n\ndef real_main(release_url=None,\n tests_json_path=None,\n upload_build_id=None,\n upload_release_name=None):\n \"\"\"Runs diff_my_urls.\"\"\"\n coordinator = workers.get_coordinator()\n fetch_worker.register(coordinator)\n coordinator.start()\n\n data = open(FLAGS.tests_json_path).read()\n tests = load_tests(data)\n\n item = DiffMyUrls(\n release_url,\n tests,\n upload_build_id,\n upload_release_name,\n heartbeat=workers.PrintWorkflow)\n item.root = True\n\n coordinator.input_queue.put(item)\n coordinator.wait_one()\n coordinator.stop()\n coordinator.join()\n\n\ndef main(argv):\n try:\n argv = FLAGS(argv)\n except gflags.FlagsError, e:\n print '%s\\nUsage: %s ARGS\\n%s' % (e, sys.argv[0], FLAGS)\n sys.exit(1)\n\n assert FLAGS.release_cut_url\n assert FLAGS.release_server_prefix\n assert FLAGS.tests_json_path\n assert FLAGS.upload_build_id\n\n if FLAGS.verbose:\n logging.getLogger().setLevel(logging.DEBUG)\n\n real_main(\n release_url=FLAGS.release_cut_url,\n tests_json_path=FLAGS.tests_json_path,\n upload_build_id=FLAGS.upload_build_id,\n upload_release_name=FLAGS.upload_release_name)\n\n\nif __name__ == '__main__':\n main(sys.argv)\n"},"license":{"kind":"string","value":"apache-2.0"}}},{"rowIdx":2717,"cells":{"repo_name":{"kind":"string","value":"Osmose/kitsune"},"path":{"kind":"string","value":"kitsune/sumo/tests/test_utils.py"},"copies":{"kind":"string","value":"11"},"size":{"kind":"string","value":"7012"},"content":{"kind":"string","value":"# -*- coding: utf8 -*-\nimport json\n\nfrom django.contrib.auth.models import Permission\nfrom django.test.client import RequestFactory\n\nfrom mock import patch, Mock\nfrom nose.tools import eq_\n\nfrom kitsune.journal.models import Record\nfrom kitsune.sumo.utils import (\n chunked, get_next_url, is_ratelimited, smart_int, truncated_json_dumps, get_browser)\nfrom kitsune.sumo.tests import TestCase\nfrom kitsune.users.tests import profile\n\n\nclass SmartIntTestCase(TestCase):\n def test_sanity(self):\n eq_(10, smart_int('10'))\n eq_(10, smart_int('10.5'))\n\n def test_int(self):\n eq_(10, smart_int(10))\n\n def test_invalid_string(self):\n eq_(0, smart_int('invalid'))\n\n def test_empty_string(self):\n eq_(0, smart_int(''))\n\n def test_wrong_type(self):\n eq_(0, smart_int(None))\n eq_(10, smart_int([], 10))\n\n def test_large_values(self):\n \"\"\"Makes sure ints that would cause an overflow result in fallback.\"\"\"\n eq_(0, smart_int('1' * 1000))\n\n\nclass GetNextUrlTests(TestCase):\n def setUp(self):\n super(GetNextUrlTests, self).setUp()\n self.r = RequestFactory()\n self.patcher = patch('django.contrib.sites.models.Site.objects')\n mock = self.patcher.start()\n mock.get_current.return_value.domain = 'su.mo.com'\n\n def tearDown(self):\n self.patcher.stop()\n super(GetNextUrlTests, self).tearDown()\n\n def test_query_string(self):\n \"\"\"Query-strings remain intact.\"\"\"\n r = self.r.get('/', {'next': '/new?f=b'})\n eq_('/new?f=b', get_next_url(r))\n\n def test_good_host_https(self):\n \"\"\"Full URLs work with valid hosts.\"\"\"\n r = self.r.post('/users/login',\n {'next': 'https://su.mo.com/kb/new'})\n eq_('https://su.mo.com/kb/new', get_next_url(r))\n\n def test_post(self):\n \"\"\"'next' in POST overrides GET.\"\"\"\n r = self.r.post('/?next=/foo', {'next': '/bar'})\n eq_('/bar', get_next_url(r))\n\n def test_get(self):\n \"\"\"'next' can be a query-string parameter.\"\"\"\n r = self.r.get('/users/login', {'next': '/kb/new'})\n eq_('/kb/new', get_next_url(r))\n\n def test_referer(self):\n \"\"\"Use HTTP referer if nothing else.\"\"\"\n r = self.r.get('/')\n r.META['HTTP_REFERER'] = 'http://su.mo.com/new'\n eq_('http://su.mo.com/new', get_next_url(r))\n\n def test_bad_host_https(self):\n r = self.r.get('/', {'next': 'https://example.com'})\n eq_(None, get_next_url(r))\n\n def test_bad_host_protocol_relative(self):\n \"\"\"Protocol-relative URLs do not let bad hosts through.\"\"\"\n r = self.r.get('/', {'next': '//example.com'})\n eq_(None, get_next_url(r))\n\n\nclass JSONTests(TestCase):\n def test_truncated_noop(self):\n \"\"\"Make sure short enough things are unmodified.\"\"\"\n d = {'foo': 'bar'}\n trunc = truncated_json_dumps(d, 1000, 'foo')\n eq_(json.dumps(d), trunc)\n\n def test_truncated_key(self):\n \"\"\"Make sure truncation works as expected.\"\"\"\n d = {'foo': 'a long string that should be truncated'}\n trunc = truncated_json_dumps(d, 30, 'foo')\n obj = json.loads(trunc)\n eq_(obj['foo'], 'a long string that ')\n eq_(len(trunc), 30)\n\n def test_unicode(self):\n \"\"\"Unicode should not be treated as longer than it is.\"\"\"\n d = {'formula': u'A=πr²'}\n trunc = truncated_json_dumps(d, 25, 'formula')\n eq_(json.dumps(d, ensure_ascii=False), trunc)\n\n\nclass ChunkedTests(TestCase):\n def test_chunked(self):\n # chunking nothing yields nothing.\n eq_(list(chunked([], 1)), [])\n\n # chunking list where len(list) < n\n eq_(list(chunked([1], 10)), [[1]])\n\n # chunking a list where len(list) == n\n eq_(list(chunked([1, 2], 2)), [[1, 2]])\n\n # chunking list where len(list) > n\n eq_(list(chunked([1, 2, 3, 4, 5], 2)),\n [[1, 2], [3, 4], [5]])\n\n # passing in a length overrides the real len(list)\n eq_(list(chunked([1, 2, 3, 4, 5, 6, 7], 2, length=4)),\n [[1, 2], [3, 4]])\n\n\nclass IsRatelimitedTest(TestCase):\n\n def test_ratelimited(self):\n u = profile().user\n request = Mock()\n request.user = u\n request.limited = False\n request.method = 'POST'\n\n # One call to the rate limit won't trigger it.\n eq_(is_ratelimited(request, 'test-ratelimited', '1/min'), False)\n # But two will\n eq_(is_ratelimited(request, 'test-ratelimited', '1/min'), True)\n\n def test_ratelimit_bypass(self):\n u = profile().user\n bypass = Permission.objects.get(codename='bypass_ratelimit')\n u.user_permissions.add(bypass)\n request = Mock()\n request.user = u\n request.limited = False\n request.method = 'POST'\n\n # One call to the rate limit won't trigger it.\n eq_(is_ratelimited(request, 'test-ratelimited', '1/min'), False)\n # And a second one still won't, because the user has the bypass permission.\n eq_(is_ratelimited(request, 'test-ratelimited', '1/min'), False)\n\n def test_ratelimit_logging(self):\n u = profile().user\n request = Mock()\n request.user = u\n request.limited = False\n request.method = 'POST'\n\n eq_(Record.objects.count(), 0)\n\n # Two calls will trigger the ratelimit once.\n is_ratelimited(request, 'test-ratelimited', '1/min')\n is_ratelimited(request, 'test-ratelimited', '1/min')\n\n eq_(Record.objects.count(), 1)\n\n\nclass GetBrowserNameTest(TestCase):\n\n def test_firefox(self):\n \"\"\"Test with User Agent of Firefox\"\"\"\n\n user_agent = 'Mozilla/5.0 (Windows NT 6.3; rv:36.0) Gecko/20100101 Firefox/36.0'\n # Check Firefox is returning\n eq_(get_browser(user_agent), 'Firefox')\n\n def test_chrome(self):\n \"\"\"Test with User Agent of Chrome\"\"\"\n\n user_agent = ('Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) '\n 'Chrome/41.0.2228.0 Safari/537.36')\n # Check Chrome is returning\n eq_(get_browser(user_agent), 'Chrome')\n\n def test_internet_explorer(self):\n \"\"\"Test with User Agent of Internet Explorer\"\"\"\n\n # Check with default User Agent of IE 11\n user_agent = 'Mozilla/5.0 (Windows NT 6.1; WOW64; Trident/7.0; AS; rv:11.0) like Gecko'\n eq_(get_browser(user_agent), 'Trident')\n # Check with Compatibility View situation user Agent of IE11\n user_agent = ('Mozilla/5.0 (compatible, MSIE 11, Windows NT 6.3; '\n 'Trident/7.0; rv:11.0) like Gecko')\n eq_(get_browser(user_agent), 'MSIE')\n\n def test_safari(self):\n \"\"\"Test with User Agent of Safari\"\"\"\n\n user_agent = ('Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_3) AppleWebKit/537.75.14'\n '(KHTML, like Gecko) Version/7.0.3 Safari/7046A194A')\n # Check Safari is returning\n eq_(get_browser(user_agent), 'Safari')\n"},"license":{"kind":"string","value":"bsd-3-clause"}}},{"rowIdx":2718,"cells":{"repo_name":{"kind":"string","value":"rpm-software-management/librepo"},"path":{"kind":"string","value":"examples/python/download_packages_with_fastestmirror.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"1645"},"content":{"kind":"string","value":"#!/usr/bin/env python3\n\n\"\"\"\nlibrepo - download packages\n\"\"\"\n\nimport os\nimport os.path\nimport time\nimport librepo\n\nCACHE = \"fastestmirror.cache\"\n\nLIBREPOPKG = \"librepo-1.2.1-2.fc20.x86_64.rpm\"\nLAMEPKG = \"lame-3.99.5-2.fc19.x86_64.rpm\"\n\nif __name__ == \"__main__\":\n\n # Setup logging\n def debug_function(msg, _):\n print(msg)\n #librepo.set_debug_log_handler(debug_function)\n\n # Remove packages if already exists\n def remove_pkg(filename):\n if os.path.exists(filename):\n os.remove(filename)\n remove_pkg(LIBREPOPKG)\n remove_pkg(LAMEPKG)\n\n # Prepare list of targets\n packages = []\n\n # Prepare first target\n h1 = librepo.Handle()\n h1.metalinkurl = \"https://mirrors.fedoraproject.org/metalink?repo=fedora-20&arch=x86_64\"\n h1.repotype = librepo.YUMREPO\n h1.fastestmirror = True\n h1.fastestmirrorcache = CACHE\n target = librepo.PackageTarget(\"Packages/l/\"+LIBREPOPKG, handle=h1)\n packages.append(target)\n\n # Prepare second target\n h2 = librepo.Handle()\n h2.mirrorlisturl = \"http://mirrors.rpmfusion.org/mirrorlist?repo=free-fedora-19&arch=x86_64\"\n h2.repotype = librepo.YUMREPO\n h2.fastestmirror = True\n h2.fastestmirrorcache = CACHE\n target = librepo.PackageTarget(LAMEPKG, handle=h2)\n packages.append(target)\n\n t = time.time()\n librepo.download_packages(packages)\n print(\"Download duration: {0}s\\n\".format((time.time() - t)))\n\n for target in packages:\n print(\"### %s: %s\" % (target.local_path, target.err or \"OK\"))\n print(\"Local path: \", target.local_path)\n print(\"Error: \", target.err)\n print()\n"},"license":{"kind":"string","value":"lgpl-2.1"}}},{"rowIdx":2719,"cells":{"repo_name":{"kind":"string","value":"mizuy/mizwiki"},"path":{"kind":"string","value":"mizwiki/utils/conv_pukiwiki.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"4421"},"content":{"kind":"string","value":"# -*- coding: utf-8 -*-\nimport codecs, os, cStringIO as StringIO, re, sys\n\nclass IStreamBuffer:\n @staticmethod\n def _conv(v):\n return v.rstrip(u'\\n\\r')\n \n def __init__(self,inputfile):\n self.input = codecs.getreader('utf-8')(inputfile)\n self.stack = []\n\n def __iter__(self):\n return self\n\n def next(self):\n if len(self.stack)>0:\n return self._conv(self.stack.pop())\n return self._conv(self.input.next())\n\n def push(self,line):\n self.stack.append(self._conv(line))\n\n def eof(self):\n if len(self.stack)==0:\n try:\n self.push(self.input.next())\n except StopIteration:\n return True\n return False\n \n def top(self):\n assert not self.eof()\n if len(self.stack)==0:\n self.push(self.input.next())\n return self.stack[-1]\n\n\ndef conv(inputs,os):\n os = codecs.getwriter('utf-8')(os)\n istr = IStreamBuffer(inputs)\n \n for l in istr:\n l = l.rstrip('~')\n assert type(l)==unicode\n\n if l.startswith('{{{'):\n os.write(l+'\\n')\n for ll in istr:\n os.write(ll+'\\n')\n if ll.startswith('}}}'):\n break\n continue\n\n if l.startswith(' '):\n istr.push(l)\n parse_quote(istr,os)\n continue\n\n if l.strip().startswith('----') and l.replace('-',' ').strip()=='':\n os.write('====\\n')\n continue\n\n parse_inline(os,l)\n os.write('\\n')\n\ndef parse_quote(istr,os):\n os.write('{{{\\n')\n for l in istr:\n if l.startswith(' '):\n os.write(l[1:]+'\\n')\n else:\n break\n os.write('}}}\\n')\n\nwikilabel = re.compile(ur'\\[\\[([^\\]]+)>([\\w_/\\.\\-]+)\\]\\]',re.U)\nnamelabel = re.compile(ur'\\[\\[([^\\]]+)>#([_a-zA-Z0-9]+)\\]\\]',re.U)\nareaedit = re.compile(ur'&areaedit\\([^\\)]*\\){([^}]*)};', re.U)\nnew = re.compile(ur'&new{([^}]*)};', re.U)\npre = re.compile(ur\"\\[|&\",re.U)\n\ndef parse_inline(doc, src):\n assert type(src)==unicode\n pos = 0\n while pos avail:\n self.cleanup()\n # TODO(atomic77) Though we can fully recover from this error\n # BackupRunner will leave the trove instance in a BACKUP state\n raise OSError(_(\"Need more free space to run mongodump, \"\n \"estimated %(est_dump_size)s\"\n \" and found %(avail)s bytes free \") %\n {'est_dump_size': est_dump_size,\n 'avail': avail})\n\n operating_system.create_directory(MONGO_DUMP_DIR, as_root=True)\n operating_system.chown(MONGO_DUMP_DIR, mongo_system.MONGO_USER,\n \"nogroup\", as_root=True)\n\n # high timeout here since mongodump can take a long time\n utils.execute_with_timeout(\n 'mongodump', '--out', MONGO_DUMP_DIR,\n *(self.app.admin_cmd_auth_params()),\n run_as_root=True, root_helper='sudo',\n timeout=LARGE_TIMEOUT\n )\n except exception.ProcessExecutionError as e:\n LOG.debug(\"Caught exception when creating the dump\")\n self.cleanup()\n raise e\n\n @property\n def cmd(self):\n \"\"\"Tars and streams the dump dir contents to\n the stdout\n \"\"\"\n cmd = 'sudo tar cPf - ' + MONGO_DUMP_DIR\n return cmd + self.zip_cmd + self.encrypt_cmd\n\n def cleanup(self):\n operating_system.remove(MONGO_DUMP_DIR, force=True, as_root=True)\n\n def _run_post_backup(self):\n self.cleanup()\n\n def estimate_dump_size(self):\n \"\"\"\n Estimate the space that the mongodump will take based on the output of\n db.stats().dataSize. This seems to be conservative, as the actual bson\n output in many cases is a fair bit smaller.\n \"\"\"\n dbs = self.app.list_all_dbs()\n # mongodump does not dump the content of the local database\n dbs.remove('local')\n dbstats = dict([(d, 0) for d in dbs])\n for d in dbstats:\n dbstats[d] = self.app.db_data_size(d)\n\n LOG.debug(\"Estimated size for databases: \" + str(dbstats))\n return sum(dbstats.values())\n"},"license":{"kind":"string","value":"apache-2.0"}}},{"rowIdx":2721,"cells":{"repo_name":{"kind":"string","value":"eciis/web"},"path":{"kind":"string","value":"backend/handlers/resend_invite_handler.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"1157"},"content":{"kind":"string","value":"# -*- coding: utf-8 -*-\n\"\"\"Resend Invite Handler.\"\"\"\nimport json\n\nfrom util import login_required\nfrom utils import json_response\nfrom utils import Utils\nfrom custom_exceptions import NotAuthorizedException\nfrom . import BaseHandler\nfrom google.appengine.ext import ndb\n\n__all__ = ['ResendInviteHandler']\n\nclass ResendInviteHandler(BaseHandler):\n \"\"\"Resend Invite Handler.\"\"\"\n\n @json_response\n @login_required\n def post(self, user, invite_key):\n \"\"\"Handle POST Requests.\"\"\"\n body = json.loads(self.request.body)\n host = self.request.host\n invite = ndb.Key(urlsafe=invite_key).get()\n \n Utils._assert(invite.status != 'sent',\n \"The invite has already been used\", NotAuthorizedException)\n\n user.check_permission(\"invite_members\",\n \"User is not allowed to send invites\",\n invite.institution_key.urlsafe())\n\n institution = invite.institution_key.get()\n Utils._assert(not institution.is_active(),\n \"This institution is not active\", NotAuthorizedException)\n\n invite.send_invite(host, user.current_institution)\n"},"license":{"kind":"string","value":"gpl-3.0"}}},{"rowIdx":2722,"cells":{"repo_name":{"kind":"string","value":"marcusmartins/compose"},"path":{"kind":"string","value":"compose/cli/verbose_proxy.py"},"copies":{"kind":"string","value":"67"},"size":{"kind":"string","value":"1691"},"content":{"kind":"string","value":"\nimport functools\nfrom itertools import chain\nimport logging\nimport pprint\n\nimport six\n\n\ndef format_call(args, kwargs):\n args = (repr(a) for a in args)\n kwargs = (\"{0!s}={1!r}\".format(*item) for item in six.iteritems(kwargs))\n return \"({0})\".format(\", \".join(chain(args, kwargs)))\n\n\ndef format_return(result, max_lines):\n if isinstance(result, (list, tuple, set)):\n return \"({0} with {1} items)\".format(type(result).__name__, len(result))\n\n if result:\n lines = pprint.pformat(result).split('\\n')\n extra = '\\n...' if len(lines) > max_lines else ''\n return '\\n'.join(lines[:max_lines]) + extra\n\n return result\n\n\nclass VerboseProxy(object):\n \"\"\"Proxy all function calls to another class and log method name, arguments\n and return values for each call.\n \"\"\"\n\n def __init__(self, obj_name, obj, log_name=None, max_lines=10):\n self.obj_name = obj_name\n self.obj = obj\n self.max_lines = max_lines\n self.log = logging.getLogger(log_name or __name__)\n\n def __getattr__(self, name):\n attr = getattr(self.obj, name)\n\n if not six.callable(attr):\n return attr\n\n return functools.partial(self.proxy_callable, name)\n\n def proxy_callable(self, call_name, *args, **kwargs):\n self.log.info(\"%s %s <- %s\",\n self.obj_name,\n call_name,\n format_call(args, kwargs))\n\n result = getattr(self.obj, call_name)(*args, **kwargs)\n self.log.info(\"%s %s -> %s\",\n self.obj_name,\n call_name,\n format_return(result, self.max_lines))\n return result\n"},"license":{"kind":"string","value":"apache-2.0"}}},{"rowIdx":2723,"cells":{"repo_name":{"kind":"string","value":"ritchyteam/odoo"},"path":{"kind":"string","value":"addons/purchase/wizard/purchase_line_invoice.py"},"copies":{"kind":"string","value":"205"},"size":{"kind":"string","value":"5419"},"content":{"kind":"string","value":"# -*- coding: utf-8 -*-\n##############################################################################\n#\n# OpenERP, Open Source Management Solution\n# Copyright (C) 2004-2010 Tiny SPRL ().\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Affero General Public License as\n# published by the Free Software Foundation, either version 3 of the\n# License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Affero General Public License for more details.\n#\n# You should have received a copy of the GNU Affero General Public License\n# along with this program. If not, see .\n#\n##############################################################################\n\nfrom openerp.osv import osv\nfrom openerp.tools.translate import _\n\n\nclass purchase_line_invoice(osv.osv_memory):\n\n \"\"\" To create invoice for purchase order line\"\"\"\n\n _name = 'purchase.order.line_invoice'\n _description = 'Purchase Order Line Make Invoice'\n\n def makeInvoices(self, cr, uid, ids, context=None):\n\n \"\"\"\n To get Purchase Order line and create Invoice\n @param self: The object pointer.\n @param cr: A database cursor\n @param uid: ID of the user currently logged in\n @param context: A standard dictionary\n @return : retrun view of Invoice\n \"\"\"\n\n if context is None:\n context={}\n\n record_ids = context.get('active_ids',[])\n if record_ids:\n res = False\n invoices = {}\n invoice_obj = self.pool.get('account.invoice')\n purchase_obj = self.pool.get('purchase.order')\n purchase_line_obj = self.pool.get('purchase.order.line')\n invoice_line_obj = self.pool.get('account.invoice.line')\n account_jrnl_obj = self.pool.get('account.journal')\n\n def multiple_order_invoice_notes(orders):\n notes = \"\"\n for order in orders:\n notes += \"%s \\n\" % order.notes\n return notes\n\n\n\n def make_invoice_by_partner(partner, orders, lines_ids):\n \"\"\"\n create a new invoice for one supplier\n @param partner : The object partner\n @param orders : The set of orders to add in the invoice\n @param lines : The list of line's id\n \"\"\"\n name = orders and orders[0].name or ''\n journal_id = account_jrnl_obj.search(cr, uid, [('type', '=', 'purchase')], context=None)\n journal_id = journal_id and journal_id[0] or False\n a = partner.property_account_payable.id\n inv = {\n 'name': name,\n 'origin': name,\n 'type': 'in_invoice',\n 'journal_id':journal_id,\n 'reference' : partner.ref,\n 'account_id': a,\n 'partner_id': partner.id,\n 'invoice_line': [(6,0,lines_ids)],\n 'currency_id' : orders[0].currency_id.id,\n 'comment': multiple_order_invoice_notes(orders),\n 'payment_term': orders[0].payment_term_id.id,\n 'fiscal_position': partner.property_account_position.id\n }\n inv_id = invoice_obj.create(cr, uid, inv)\n for order in orders:\n order.write({'invoice_ids': [(4, inv_id)]})\n return inv_id\n\n for line in purchase_line_obj.browse(cr, uid, record_ids, context=context):\n if (not line.invoiced) and (line.state not in ('draft', 'cancel')):\n if not line.partner_id.id in invoices:\n invoices[line.partner_id.id] = []\n acc_id = purchase_obj._choose_account_from_po_line(cr, uid, line, context=context)\n inv_line_data = purchase_obj._prepare_inv_line(cr, uid, acc_id, line, context=context)\n inv_line_data.update({'origin': line.order_id.name})\n inv_id = invoice_line_obj.create(cr, uid, inv_line_data, context=context)\n purchase_line_obj.write(cr, uid, [line.id], {'invoiced': True, 'invoice_lines': [(4, inv_id)]})\n invoices[line.partner_id.id].append((line,inv_id))\n\n res = []\n for result in invoices.values():\n il = map(lambda x: x[1], result)\n orders = list(set(map(lambda x : x[0].order_id, result)))\n\n res.append(make_invoice_by_partner(orders[0].partner_id, orders, il))\n\n return {\n 'domain': \"[('id','in', [\"+','.join(map(str,res))+\"])]\",\n 'name': _('Supplier Invoices'),\n 'view_type': 'form',\n 'view_mode': 'tree,form',\n 'res_model': 'account.invoice',\n 'view_id': False,\n 'context': \"{'type':'in_invoice', 'journal_type': 'purchase'}\",\n 'type': 'ir.actions.act_window'\n }\n\n\n# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:\n\n"},"license":{"kind":"string","value":"agpl-3.0"}}},{"rowIdx":2724,"cells":{"repo_name":{"kind":"string","value":"runefriborg/pycsp"},"path":{"kind":"string","value":"test/unix/multiprocesstest.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"7496"},"content":{"kind":"string","value":"\"\"\"\nCopyright (c) 2009 John Markus Bjoerndalen ,\n Brian Vinter , Rune M. Friborg \nPermission is hereby granted, free of charge, to any person obtaining\na copy of this software and associated documentation files (the\n\"Software\"), to deal in the Software without restriction, including\nwithout limitation the rights to use, copy, modify, merge, publish,\ndistribute, sublicense, and/or sell copies of the Software, and to\npermit persons to whom the Software is furnished to do so, subject to\nthe following conditions:\n \nThe above copyright notice and this permission notice shall be\nincluded in all copies or substantial portions of the Software. THE\nSOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\nMERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\nNONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE\nLIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION\nOF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION\nWITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\"\"\"\n\n\nimport sys\nsys.path.insert(0, \"../..\")\nfrom pycsp.parallel import *\nimport check\nimport time\nimport random\n\n@choice\ndef action(assertCheck, id, channel_input=None):\n if assertCheck:\n assertCheck(id)\n\n@multiprocess\ndef reader(cin, id, sleeper, assertCheck=None):\n while True:\n if sleeper: sleeper()\n got = cin()\n if assertCheck:\n assertCheck(id)\n \n@multiprocess\ndef writer(cout, id, cnt, sleeper):\n for i in range(cnt):\n if sleeper: sleeper()\n cout((id, i))\n retire(cout)\n\n@multiprocess\ndef par_reader(cin1,cin2,cin3,cin4, cnt, sleeper, assertCheck=None):\n while True:\n if sleeper: sleeper()\n \n AltSelect(\n InputGuard(cin1, action(assertCheck, 0)),\n InputGuard(cin2, action(assertCheck, 1)),\n InputGuard(cin3, action(assertCheck, 2)),\n InputGuard(cin4, action(assertCheck, 3))\n )\n\n@multiprocess\ndef par_fair_reader(cin1,cin2,cin3,cin4, cnt, sleeper, assertCheck=None):\n while True:\n if sleeper: sleeper()\n \n FairSelect(\n InputGuard(cin1, action(assertCheck, 0)),\n InputGuard(cin2, action(assertCheck, 1)),\n InputGuard(cin3, action(assertCheck, 2)),\n InputGuard(cin4, action(assertCheck, 3))\n )\n\n@multiprocess\ndef par_pri_reader(cin1,cin2,cin3,cin4, cnt, sleeper, assertCheck=None):\n while True:\n if sleeper: sleeper()\n \n PriSelect(\n InputGuard(cin1, action(assertCheck, 0)),\n InputGuard(cin2, action(assertCheck, 1)),\n InputGuard(cin3, action(assertCheck, 2)),\n InputGuard(cin4, action(assertCheck, 3))\n )\n\n@multiprocess\ndef return_msg(cin, sleeper):\n if sleeper: sleeper()\n return cin()\n\n\n@io\ndef sleep_one():\n time.sleep(0.01)\n\n@io\ndef sleep_random():\n time.sleep(random.random()/100)\n\ndef Parallel_Test(sleeper):\n \n c1=Channel()\n \n L= Parallel(writer(c1.writer(), 0, 10, sleeper), 10 * return_msg(c1.reader(), sleeper))\n \n if L and len(L) == 11 and L[0] == None and not None in L[1:]:\n print((\"OK - MultiProcess_Parallel_Test\"+str(sleeper)))\n else:\n print((\"Error - MultiProcess_Parallel_Test\"+str(sleeper)))\n print((str(L)))\n\ndef Sequence_Test(sleeper):\n \n c1=Channel()\n \n Spawn(writer(c1.writer(), 0, 10, sleeper))\n L= Sequence(10 * return_msg(c1.reader(), sleeper))\n \n if L and len(L) == 10 and not None in L:\n print((\"OK - MultiProcess_Sequence_Test\"+str(sleeper)))\n else:\n print((\"Error - MultiProcess_Sequence_Test\"+str(sleeper)))\n print((str(L)))\n\ndef One2One_Test(read_sleeper, write_sleeper):\n x = Channel()\n Spawn(check.Assert(x.reader(), \"MultiProcess_One2One_Test\"+str(read_sleeper)+str(write_sleeper), count=10, vocabulary=[0]))\n\n c1=Channel()\n Parallel(reader(c1.reader(), 0 , read_sleeper, x.writer()), writer(c1.writer(),1,10, write_sleeper))\n\ndef Any2One_Alting_Test(read_sleeper, write_sleeper):\n x = Channel()\n Spawn(check.Assert(x.reader(), \"MultiProcess_Any2One_Alting_Test\"+str(read_sleeper)+str(write_sleeper), count=40, minimum=10, vocabulary=[0,1,2,3], quit_on_count=True))\n\n c1=Channel()\n c2=Channel()\n c3=Channel()\n c4=Channel()\n\n cnt = 10\n\n Parallel(par_reader(c1.reader(), c2.reader(), c3.reader(), c4.reader(),cnt, read_sleeper, x.writer()),\n writer(c1.writer(),0,cnt, write_sleeper),\n writer(c2.writer(),1,cnt, write_sleeper),\n writer(c3.writer(),2,cnt, write_sleeper),\n writer(c4.writer(),3,cnt, write_sleeper))\n\ndef Any2One_FairAlting_Test(read_sleeper, write_sleeper):\n x = Channel()\n Spawn(check.Assert(x.reader(), \"MultiProcess_Any2One_FairAlting_Test\"+str(read_sleeper)+str(write_sleeper), count=40, minimum=10, vocabulary=[0,1,2,3], quit_on_count=True))\n\n c1=Channel()\n c2=Channel()\n c3=Channel()\n c4=Channel()\n\n cnt = 10\n\n Parallel(par_fair_reader(c1.reader(), c2.reader(), c3.reader(), c4.reader(),cnt, read_sleeper, x.writer()),\n writer(c1.writer(),0,cnt, write_sleeper),\n writer(c2.writer(),1,cnt, write_sleeper),\n writer(c3.writer(),2,cnt, write_sleeper),\n writer(c4.writer(),3,cnt, write_sleeper))\n\ndef Any2One_PriAlting_Test(read_sleeper, write_sleeper):\n x = Channel()\n Spawn(check.Assert(x.reader(), \"MultiProcess_Any2One_PriAlting_Test\"+str(read_sleeper)+str(write_sleeper), count=40, minimum=10, vocabulary=[0,1,2,3], quit_on_count=True))\n\n c1=Channel()\n c2=Channel()\n c3=Channel()\n c4=Channel()\n\n cnt = 10\n\n Parallel(par_pri_reader(c1.reader(), c2.reader(), c3.reader(), c4.reader(),cnt, read_sleeper, x.writer()),\n writer(c1.writer(),0,cnt, write_sleeper),\n writer(c2.writer(),1,cnt, write_sleeper),\n writer(c3.writer(),2,cnt, write_sleeper),\n writer(c4.writer(),3,cnt, write_sleeper))\n\ndef Any2Any_Test(read_sleeper, write_sleeper):\n x = Channel()\n Spawn(check.Assert(x.reader(), \"MultiProcess_Any2Any_Test\"+str(read_sleeper)+str(write_sleeper), count=40, vocabulary=[0,1,2,3]))\n\n c1=Channel() \n cnt = 10\n\n Parallel(reader(c1.reader(),0, read_sleeper, x.writer()), writer(c1.writer(),0,cnt, write_sleeper),\n reader(c1.reader(),1, read_sleeper, x.writer()), writer(c1.writer(),1,cnt, write_sleeper),\n reader(c1.reader(),2, read_sleeper, x.writer()), writer(c1.writer(),2,cnt, write_sleeper),\n reader(c1.reader(),3, read_sleeper, x.writer()), writer(c1.writer(),3,cnt, write_sleeper))\n \n\ndef autotest():\n for read_sleep in [('Zero', None), ('One',sleep_one), ('Random',sleep_random)]:\n\n Sequence_Test(read_sleep[1])\n Parallel_Test(read_sleep[1])\n\n for write_sleep in [('Zero', None), ('One',sleep_one), ('Random',sleep_random)]:\n rname, rsleep = read_sleep\n wname, wsleep = write_sleep\n\n if not rsleep==wsleep==sleep_one:\n One2One_Test(rsleep, wsleep)\n Any2One_Alting_Test(rsleep, wsleep)\n Any2One_FairAlting_Test(rsleep, wsleep)\n Any2One_PriAlting_Test(rsleep, wsleep)\n Any2Any_Test(rsleep, wsleep)\n\nif __name__ == '__main__':\n autotest()\n shutdown()\n"},"license":{"kind":"string","value":"mit"}}},{"rowIdx":2725,"cells":{"repo_name":{"kind":"string","value":"alvarolopez/nova"},"path":{"kind":"string","value":"nova/cmd/network.py"},"copies":{"kind":"string","value":"27"},"size":{"kind":"string","value":"2415"},"content":{"kind":"string","value":"# Copyright 2010 United States Government as represented by the\n# Administrator of the National Aeronautics and Space Administration.\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\n\"\"\"Starter script for Nova Network.\"\"\"\n\nimport sys\nimport traceback\n\nfrom oslo_config import cfg\nfrom oslo_log import log as logging\n\nfrom nova.conductor import rpcapi as conductor_rpcapi\nfrom nova import config\nimport nova.db.api\nfrom nova import exception\nfrom nova.i18n import _LE\nfrom nova import objects\nfrom nova.objects import base as objects_base\nfrom nova.openstack.common.report import guru_meditation_report as gmr\nfrom nova import service\nfrom nova import utils\nfrom nova import version\n\nCONF = cfg.CONF\nCONF.import_opt('network_topic', 'nova.network.rpcapi')\nCONF.import_opt('use_local', 'nova.conductor.api', group='conductor')\n\n\ndef block_db_access():\n class NoDB(object):\n def __getattr__(self, attr):\n return self\n\n def __call__(self, *args, **kwargs):\n stacktrace = \"\".join(traceback.format_stack())\n LOG = logging.getLogger('nova.network')\n LOG.error(_LE('No db access allowed in nova-network: %s'),\n stacktrace)\n raise exception.DBNotAllowed('nova-network')\n\n nova.db.api.IMPL = NoDB()\n\n\ndef main():\n config.parse_args(sys.argv)\n logging.setup(CONF, \"nova\")\n utils.monkey_patch()\n objects.register_all()\n\n gmr.TextGuruMeditation.setup_autorun(version)\n\n if not CONF.conductor.use_local:\n block_db_access()\n objects_base.NovaObject.indirection_api = \\\n conductor_rpcapi.ConductorAPI()\n\n server = service.Service.create(binary='nova-network',\n topic=CONF.network_topic,\n db_allowed=CONF.conductor.use_local)\n service.serve(server)\n service.wait()\n"},"license":{"kind":"string","value":"apache-2.0"}}},{"rowIdx":2726,"cells":{"repo_name":{"kind":"string","value":"quinot/ansible-modules-core"},"path":{"kind":"string","value":"cloud/amazon/iam_cert.py"},"copies":{"kind":"string","value":"20"},"size":{"kind":"string","value":"11703"},"content":{"kind":"string","value":"#!/usr/bin/python\n# This file is part of Ansible\n#\n# Ansible is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# Ansible is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with Ansible. If not, see .\nDOCUMENTATION = '''\n---\nmodule: iam_cert\nshort_description: Manage server certificates for use on ELBs and CloudFront\ndescription:\n - Allows for the management of server certificates\nversion_added: \"2.0\"\noptions:\n name:\n description:\n - Name of certificate to add, update or remove.\n required: true\n aliases: []\n new_name:\n description:\n - When present, this will update the name of the cert with the value passed here.\n required: false\n aliases: []\n new_path:\n description:\n - When present, this will update the path of the cert with the value passed here.\n required: false\n aliases: []\n state:\n description:\n - Whether to create, delete certificate. When present is specified it will attempt to make an update if new_path or new_name is specified.\n required: true\n default: null\n choices: [ \"present\", \"absent\" ]\n aliases: []\n path:\n description:\n - When creating or updating, specify the desired path of the certificate\n required: false\n default: \"/\"\n aliases: []\n cert_chain:\n description:\n - The path to the CA certificate chain in PEM encoded format.\n required: false\n default: null\n aliases: []\n cert:\n description:\n - The path to the certificate body in PEM encoded format.\n required: false\n aliases: []\n key:\n description:\n - The path to the private key of the certificate in PEM encoded format.\n dup_ok:\n description:\n - By default the module will not upload a certifcate that is already uploaded into AWS. If set to True, it will upload the certifcate as long as the name is unique.\n required: false\n default: False\n aliases: []\n aws_secret_key:\n description:\n - AWS secret key. If not set then the value of the AWS_SECRET_KEY environment variable is used.\n required: false\n default: null\n aliases: [ 'ec2_secret_key', 'secret_key' ]\n aws_access_key:\n description:\n - AWS access key. If not set then the value of the AWS_ACCESS_KEY environment variable is used.\n required: false\n default: null\n aliases: [ 'ec2_access_key', 'access_key' ]\n\n\nrequirements: [ \"boto\" ]\nauthor: Jonathan I. Davila\nextends_documentation_fragment:\n - aws\n - ec2\n'''\n\nEXAMPLES = '''\n# Basic server certificate upload\ntasks:\n- name: Upload Certifcate\n iam_cert:\n name: very_ssl\n state: present\n cert: somecert.pem\n key: privcertkey\n cert_chain: myverytrustedchain\n\n'''\nimport json\nimport sys\ntry:\n import boto\n import boto.iam\n import boto.ec2\n HAS_BOTO = True\nexcept ImportError:\n HAS_BOTO = False\n\ndef boto_exception(err):\n '''generic error message handler'''\n if hasattr(err, 'error_message'):\n error = err.error_message\n elif hasattr(err, 'message'):\n error = err.message\n else:\n error = '%s: %s' % (Exception, err)\n\n return error\n\ndef cert_meta(iam, name):\n opath = iam.get_server_certificate(name).get_server_certificate_result.\\\n server_certificate.\\\n server_certificate_metadata.\\\n path\n ocert = iam.get_server_certificate(name).get_server_certificate_result.\\\n server_certificate.\\\n certificate_body\n ocert_id = iam.get_server_certificate(name).get_server_certificate_result.\\\n server_certificate.\\\n server_certificate_metadata.\\\n server_certificate_id\n upload_date = iam.get_server_certificate(name).get_server_certificate_result.\\\n server_certificate.\\\n server_certificate_metadata.\\\n upload_date\n exp = iam.get_server_certificate(name).get_server_certificate_result.\\\n server_certificate.\\\n server_certificate_metadata.\\\n expiration\n return opath, ocert, ocert_id, upload_date, exp\n\ndef dup_check(module, iam, name, new_name, cert, orig_cert_names, orig_cert_bodies, dup_ok):\n update=False\n if any(ct in orig_cert_names for ct in [name, new_name]):\n for i_name in [name, new_name]:\n if i_name is None:\n continue\n\n if cert is not None:\n try:\n c_index=orig_cert_names.index(i_name)\n except NameError:\n continue\n else:\n if orig_cert_bodies[c_index] == cert:\n update=True\n break\n elif orig_cert_bodies[c_index] != cert:\n module.fail_json(changed=False, msg='A cert with the name %s already exists and'\n ' has a different certificate body associated'\n ' with it. Certifcates cannot have the same name')\n else:\n update=True\n break\n elif cert in orig_cert_bodies and not dup_ok:\n for crt_name, crt_body in zip(orig_cert_names, orig_cert_bodies):\n if crt_body == cert:\n module.fail_json(changed=False, msg='This certificate already'\n ' exists under the name %s' % crt_name)\n\n return update\n\n\ndef cert_action(module, iam, name, cpath, new_name, new_path, state,\n cert, key, chain, orig_cert_names, orig_cert_bodies, dup_ok):\n if state == 'present':\n update = dup_check(module, iam, name, new_name, cert, orig_cert_names,\n orig_cert_bodies, dup_ok)\n if update:\n opath, ocert, ocert_id, upload_date, exp = cert_meta(iam, name)\n changed=True\n if new_name and new_path:\n iam.update_server_cert(name, new_cert_name=new_name, new_path=new_path)\n module.exit_json(changed=changed, original_name=name, new_name=new_name,\n original_path=opath, new_path=new_path, cert_body=ocert,\n upload_date=upload_date, expiration_date=exp)\n elif new_name and not new_path:\n iam.update_server_cert(name, new_cert_name=new_name)\n module.exit_json(changed=changed, original_name=name, new_name=new_name,\n cert_path=opath, cert_body=ocert,\n upload_date=upload_date, expiration_date=exp)\n elif not new_name and new_path:\n iam.update_server_cert(name, new_path=new_path)\n module.exit_json(changed=changed, name=new_name,\n original_path=opath, new_path=new_path, cert_body=ocert,\n upload_date=upload_date, expiration_date=exp)\n else:\n changed=False\n module.exit_json(changed=changed, name=name, cert_path=opath, cert_body=ocert,\n upload_date=upload_date, expiration_date=exp,\n msg='No new path or name specified. No changes made')\n else:\n changed=True\n iam.upload_server_cert(name, cert, key, cert_chain=chain, path=cpath)\n opath, ocert, ocert_id, upload_date, exp = cert_meta(iam, name)\n module.exit_json(changed=changed, name=name, cert_path=opath, cert_body=ocert,\n upload_date=upload_date, expiration_date=exp)\n elif state == 'absent':\n if name in orig_cert_names:\n changed=True\n iam.delete_server_cert(name)\n module.exit_json(changed=changed, deleted_cert=name)\n else:\n changed=False\n module.exit_json(changed=changed, msg='Certifcate with the name %s already absent' % name)\n\ndef main():\n argument_spec = ec2_argument_spec()\n argument_spec.update(dict(\n state=dict(\n default=None, required=True, choices=['present', 'absent']),\n name=dict(default=None, required=False),\n cert=dict(default=None, required=False),\n key=dict(default=None, required=False),\n cert_chain=dict(default=None, required=False),\n new_name=dict(default=None, required=False),\n path=dict(default='/', required=False),\n new_path=dict(default=None, required=False),\n dup_ok=dict(default=False, required=False, choices=[False, True])\n )\n )\n\n module = AnsibleModule(\n argument_spec=argument_spec,\n mutually_exclusive=[],\n )\n\n if not HAS_BOTO:\n module.fail_json(msg=\"Boto is required for this module\")\n\n region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module)\n\n try:\n if region:\n iam = boto.iam.connect_to_region(region, **aws_connect_kwargs)\n else:\n iam = boto.iam.connection.IAMConnection(**aws_connect_kwargs)\n except boto.exception.NoAuthHandlerFound, e:\n module.fail_json(msg=str(e))\n\n state = module.params.get('state')\n name = module.params.get('name')\n path = module.params.get('path')\n new_name = module.params.get('new_name')\n new_path = module.params.get('new_path')\n cert_chain = module.params.get('cert_chain')\n dup_ok = module.params.get('dup_ok')\n if state == 'present':\n cert = open(module.params.get('cert'), 'r').read().rstrip()\n key = open(module.params.get('key'), 'r').read().rstrip()\n if cert_chain is not None:\n cert_chain = open(module.params.get('cert_chain'), 'r').read()\n else:\n key=cert=chain=None\n\n orig_certs = [ctb['server_certificate_name'] for ctb in \\\n iam.get_all_server_certs().\\\n list_server_certificates_result.\\\n server_certificate_metadata_list]\n orig_bodies = [iam.get_server_certificate(thing).\\\n get_server_certificate_result.\\\n certificate_body \\\n for thing in orig_certs]\n if new_name == name:\n new_name = None\n if new_path == path:\n new_path = None\n\n changed = False\n try:\n cert_action(module, iam, name, path, new_name, new_path, state,\n cert, key, cert_chain, orig_certs, orig_bodies, dup_ok)\n except boto.exception.BotoServerError, err:\n module.fail_json(changed=changed, msg=str(err), debug=[cert,key])\n\n\nfrom ansible.module_utils.basic import *\nfrom ansible.module_utils.ec2 import *\n\nif __name__ == '__main__':\n main()\n"},"license":{"kind":"string","value":"gpl-3.0"}}},{"rowIdx":2727,"cells":{"repo_name":{"kind":"string","value":"gnowxilef/Wox"},"path":{"kind":"string","value":"PythonHome/Lib/site-packages/chardet/langbulgarianmodel.py"},"copies":{"kind":"string","value":"2965"},"size":{"kind":"string","value":"12784"},"content":{"kind":"string","value":"######################## BEGIN LICENSE BLOCK ########################\n# The Original Code is Mozilla Communicator client code.\n#\n# The Initial Developer of the Original Code is\n# Netscape Communications Corporation.\n# Portions created by the Initial Developer are Copyright (C) 1998\n# the Initial Developer. All Rights Reserved.\n#\n# Contributor(s):\n# Mark Pilgrim - port to Python\n#\n# This library is free software; you can redistribute it and/or\n# modify it under the terms of the GNU Lesser General Public\n# License as published by the Free Software Foundation; either\n# version 2.1 of the License, or (at your option) any later version.\n#\n# This library is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU\n# Lesser General Public License for more details.\n#\n# You should have received a copy of the GNU Lesser General Public\n# License along with this library; if not, write to the Free Software\n# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA\n# 02110-1301 USA\n######################### END LICENSE BLOCK #########################\n\n# 255: Control characters that usually does not exist in any text\n# 254: Carriage/Return\n# 253: symbol (punctuation) that does not belong to word\n# 252: 0 - 9\n\n# Character Mapping Table:\n# this table is modified base on win1251BulgarianCharToOrderMap, so\n# only number <64 is sure valid\n\nLatin5_BulgarianCharToOrderMap = (\n255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00\n255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10\n253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20\n252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30\n253, 77, 90, 99,100, 72,109,107,101, 79,185, 81,102, 76, 94, 82, # 40\n110,186,108, 91, 74,119, 84, 96,111,187,115,253,253,253,253,253, # 50\n253, 65, 69, 70, 66, 63, 68,112,103, 92,194,104, 95, 86, 87, 71, # 60\n116,195, 85, 93, 97,113,196,197,198,199,200,253,253,253,253,253, # 70\n194,195,196,197,198,199,200,201,202,203,204,205,206,207,208,209, # 80\n210,211,212,213,214,215,216,217,218,219,220,221,222,223,224,225, # 90\n 81,226,227,228,229,230,105,231,232,233,234,235,236, 45,237,238, # a0\n 31, 32, 35, 43, 37, 44, 55, 47, 40, 59, 33, 46, 38, 36, 41, 30, # b0\n 39, 28, 34, 51, 48, 49, 53, 50, 54, 57, 61,239, 67,240, 60, 56, # c0\n 1, 18, 9, 20, 11, 3, 23, 15, 2, 26, 12, 10, 14, 6, 4, 13, # d0\n 7, 8, 5, 19, 29, 25, 22, 21, 27, 24, 17, 75, 52,241, 42, 16, # e0\n 62,242,243,244, 58,245, 98,246,247,248,249,250,251, 91,252,253, # f0\n)\n\nwin1251BulgarianCharToOrderMap = (\n255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00\n255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10\n253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20\n252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30\n253, 77, 90, 99,100, 72,109,107,101, 79,185, 81,102, 76, 94, 82, # 40\n110,186,108, 91, 74,119, 84, 96,111,187,115,253,253,253,253,253, # 50\n253, 65, 69, 70, 66, 63, 68,112,103, 92,194,104, 95, 86, 87, 71, # 60\n116,195, 85, 93, 97,113,196,197,198,199,200,253,253,253,253,253, # 70\n206,207,208,209,210,211,212,213,120,214,215,216,217,218,219,220, # 80\n221, 78, 64, 83,121, 98,117,105,222,223,224,225,226,227,228,229, # 90\n 88,230,231,232,233,122, 89,106,234,235,236,237,238, 45,239,240, # a0\n 73, 80,118,114,241,242,243,244,245, 62, 58,246,247,248,249,250, # b0\n 31, 32, 35, 43, 37, 44, 55, 47, 40, 59, 33, 46, 38, 36, 41, 30, # c0\n 39, 28, 34, 51, 48, 49, 53, 50, 54, 57, 61,251, 67,252, 60, 56, # d0\n 1, 18, 9, 20, 11, 3, 23, 15, 2, 26, 12, 10, 14, 6, 4, 13, # e0\n 7, 8, 5, 19, 29, 25, 22, 21, 27, 24, 17, 75, 52,253, 42, 16, # f0\n)\n\n# Model Table:\n# total sequences: 100%\n# first 512 sequences: 96.9392%\n# first 1024 sequences:3.0618%\n# rest sequences: 0.2992%\n# negative sequences: 0.0020%\nBulgarianLangModel = (\n0,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,3,3,3,3,3,3,3,3,2,3,3,3,3,3,\n3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,0,3,3,3,2,2,3,2,2,1,2,2,\n3,1,3,3,2,3,3,3,3,3,3,3,3,3,3,3,3,0,3,3,3,3,3,3,3,3,3,3,0,3,0,1,\n0,0,0,0,0,0,0,0,0,0,1,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,\n3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,3,2,3,3,3,3,3,3,3,3,0,3,1,0,\n0,1,0,0,0,0,0,0,0,0,1,1,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,\n3,2,2,2,3,3,3,3,3,3,3,3,3,3,3,3,3,1,3,2,3,3,3,3,3,3,3,3,0,3,0,0,\n0,0,0,0,0,0,0,0,0,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,\n3,2,3,3,2,3,3,3,3,3,3,3,3,3,3,3,3,1,3,2,3,3,3,3,3,3,3,3,0,3,0,0,\n0,0,0,0,0,0,0,0,0,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,\n3,3,3,3,3,3,3,3,3,3,3,2,3,2,2,1,3,3,3,3,2,2,2,1,1,2,0,1,0,1,0,0,\n0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,1,\n3,3,3,3,3,3,3,2,3,2,2,3,3,1,1,2,3,3,2,3,3,3,3,2,1,2,0,2,0,3,0,0,\n0,0,0,0,0,0,0,1,0,0,2,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,1,\n3,3,3,3,3,3,3,1,3,3,3,3,3,2,3,2,3,3,3,3,3,2,3,3,1,3,0,3,0,2,0,0,\n0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,\n3,3,3,3,3,3,3,3,1,3,3,2,3,3,3,1,3,3,2,3,2,2,2,0,0,2,0,2,0,2,0,0,\n0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,1,\n3,3,3,3,3,3,3,3,3,0,3,3,3,2,2,3,3,3,1,2,2,3,2,1,1,2,0,2,0,0,0,0,\n1,0,0,0,0,0,0,0,0,0,2,0,0,1,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,\n3,3,3,3,3,3,3,2,3,3,1,2,3,2,2,2,3,3,3,3,3,2,2,3,1,2,0,2,1,2,0,0,\n0,0,0,0,0,0,0,0,0,0,3,0,0,1,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,1,\n3,3,3,3,3,1,3,3,3,3,3,2,3,3,3,2,3,3,2,3,2,2,2,3,1,2,0,1,0,1,0,0,\n0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,\n3,3,3,3,3,3,3,3,3,3,3,1,1,1,2,2,1,3,1,3,2,2,3,0,0,1,0,1,0,1,0,0,\n0,0,0,1,0,0,0,0,1,0,2,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,\n3,3,3,3,3,2,2,3,2,2,3,1,2,1,1,1,2,3,1,3,1,2,2,0,1,1,1,1,0,1,0,0,\n0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,\n3,3,3,3,3,1,3,2,2,3,3,1,2,3,1,1,3,3,3,3,1,2,2,1,1,1,0,2,0,2,0,1,\n0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,\n3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,1,2,2,3,3,3,2,2,1,1,2,0,2,0,1,0,0,\n0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,\n3,0,1,2,1,3,3,2,3,3,3,3,3,2,3,2,1,0,3,1,2,1,2,1,2,3,2,1,0,1,0,0,\n0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,\n1,1,1,2,3,3,3,3,3,3,3,3,3,3,3,3,0,0,3,1,3,3,2,3,3,2,2,2,0,1,0,0,\n0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,\n2,3,3,3,3,0,3,3,3,3,3,2,1,1,2,1,3,3,0,3,1,1,1,1,3,2,0,1,0,0,0,0,\n0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,\n3,3,2,2,2,3,3,3,3,3,3,3,3,3,3,3,1,1,3,1,3,3,2,3,2,2,2,3,0,2,0,0,\n0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,\n3,3,3,3,3,2,3,3,2,2,3,2,1,1,1,1,1,3,1,3,1,1,0,0,0,1,0,0,0,1,0,0,\n0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,\n3,3,3,3,3,2,3,2,0,3,2,0,3,0,2,0,0,2,1,3,1,0,0,1,0,0,0,1,0,0,0,0,\n0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,\n3,3,3,3,2,1,1,1,1,2,1,1,2,1,1,1,2,2,1,2,1,1,1,0,1,1,0,1,0,1,0,0,\n0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,\n3,3,3,3,2,1,3,1,1,2,1,3,2,1,1,0,1,2,3,2,1,1,1,0,0,0,0,0,0,0,0,0,\n0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,\n2,3,3,3,3,2,2,1,0,1,0,0,1,0,0,0,2,1,0,3,0,0,1,0,0,0,0,0,0,0,0,0,\n0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,\n3,3,3,2,3,2,3,3,1,3,2,1,1,1,2,1,1,2,1,3,0,1,0,0,0,1,0,0,0,0,0,0,\n0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,\n3,1,1,2,2,3,3,2,3,2,2,2,3,1,2,2,1,1,2,1,1,2,2,0,1,1,0,1,0,2,0,0,\n0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,\n3,3,3,3,2,1,3,1,0,2,2,1,3,2,1,0,0,2,0,2,0,1,0,0,0,0,0,0,0,1,0,0,\n0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,\n3,3,3,3,3,3,1,2,0,2,3,1,2,3,2,0,1,3,1,2,1,1,1,0,0,1,0,0,2,2,2,3,\n2,2,2,2,1,2,1,1,2,2,1,1,2,0,1,1,1,0,0,1,1,0,0,1,1,0,0,0,1,1,0,1,\n3,3,3,3,3,2,1,2,2,1,2,0,2,0,1,0,1,2,1,2,1,1,0,0,0,1,0,1,0,0,0,0,\n0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,1,\n3,3,2,3,3,1,1,3,1,0,3,2,1,0,0,0,1,2,0,2,0,1,0,0,0,1,0,1,2,1,2,2,\n1,1,1,1,1,1,1,2,2,2,1,1,1,1,1,1,1,0,1,2,1,1,1,0,0,0,0,0,1,1,0,0,\n3,1,0,1,0,2,3,2,2,2,3,2,2,2,2,2,1,0,2,1,2,1,1,1,0,1,2,1,2,2,2,1,\n1,1,2,2,2,2,1,2,1,1,0,1,2,1,2,2,2,1,1,1,0,1,1,1,1,2,0,1,0,0,0,0,\n2,3,2,3,3,0,0,2,1,0,2,1,0,0,0,0,2,3,0,2,0,0,0,0,0,1,0,0,2,0,1,2,\n2,1,2,1,2,2,1,1,1,2,1,1,1,0,1,2,2,1,1,1,1,1,0,1,1,1,0,0,1,2,0,0,\n3,3,2,2,3,0,2,3,1,1,2,0,0,0,1,0,0,2,0,2,0,0,0,1,0,1,0,1,2,0,2,2,\n1,1,1,1,2,1,0,1,2,2,2,1,1,1,1,1,1,1,0,1,1,1,0,0,0,0,0,0,1,1,0,0,\n2,3,2,3,3,0,0,3,0,1,1,0,1,0,0,0,2,2,1,2,0,0,0,0,0,0,0,0,2,0,1,2,\n2,2,1,1,1,1,1,2,2,2,1,0,2,0,1,0,1,0,0,1,0,1,0,0,1,0,0,0,0,1,0,0,\n3,3,3,3,2,2,2,2,2,0,2,1,1,1,1,2,1,2,1,1,0,2,0,1,0,1,0,0,2,0,1,2,\n1,1,1,1,1,1,1,2,2,1,1,0,2,0,1,0,2,0,0,1,1,1,0,0,2,0,0,0,1,1,0,0,\n2,3,3,3,3,1,0,0,0,0,0,0,0,0,0,0,2,0,0,1,1,0,0,0,0,0,0,1,2,0,1,2,\n2,2,2,1,1,2,1,1,2,2,2,1,2,0,1,1,1,1,1,1,0,1,1,1,1,0,0,1,1,1,0,0,\n2,3,3,3,3,0,2,2,0,2,1,0,0,0,1,1,1,2,0,2,0,0,0,3,0,0,0,0,2,0,2,2,\n1,1,1,2,1,2,1,1,2,2,2,1,2,0,1,1,1,0,1,1,1,1,0,2,1,0,0,0,1,1,0,0,\n2,3,3,3,3,0,2,1,0,0,2,0,0,0,0,0,1,2,0,2,0,0,0,0,0,0,0,0,2,0,1,2,\n1,1,1,2,1,1,1,1,2,2,2,0,1,0,1,1,1,0,0,1,1,1,0,0,1,0,0,0,0,1,0,0,\n3,3,2,2,3,0,1,0,1,0,0,0,0,0,0,0,1,1,0,3,0,0,0,0,0,0,0,0,1,0,2,2,\n1,1,1,1,1,2,1,1,2,2,1,2,2,1,0,1,1,1,1,1,0,1,0,0,1,0,0,0,1,1,0,0,\n3,1,0,1,0,2,2,2,2,3,2,1,1,1,2,3,0,0,1,0,2,1,1,0,1,1,1,1,2,1,1,1,\n1,2,2,1,2,1,2,2,1,1,0,1,2,1,2,2,1,1,1,0,0,1,1,1,2,1,0,1,0,0,0,0,\n2,1,0,1,0,3,1,2,2,2,2,1,2,2,1,1,1,0,2,1,2,2,1,1,2,1,1,0,2,1,1,1,\n1,2,2,2,2,2,2,2,1,2,0,1,1,0,2,1,1,1,1,1,0,0,1,1,1,1,0,1,0,0,0,0,\n2,1,1,1,1,2,2,2,2,1,2,2,2,1,2,2,1,1,2,1,2,3,2,2,1,1,1,1,0,1,0,0,\n0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,\n2,2,2,3,2,0,1,2,0,1,2,1,1,0,1,0,1,2,1,2,0,0,0,1,1,0,0,0,1,0,0,2,\n1,1,0,0,1,1,0,1,1,1,1,0,2,0,1,1,1,0,0,1,1,0,0,0,0,1,0,0,0,1,0,0,\n2,0,0,0,0,1,2,2,2,2,2,2,2,1,2,1,1,1,1,1,1,1,0,1,1,1,1,1,2,1,1,1,\n1,2,2,2,2,1,1,2,1,2,1,1,1,0,2,1,2,1,1,1,0,2,1,1,1,1,0,1,0,0,0,0,\n3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,1,0,\n1,1,0,1,0,1,1,1,1,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,\n2,2,2,3,2,0,0,0,0,1,0,0,0,0,0,0,1,1,0,2,0,0,0,0,0,0,0,0,1,0,1,2,\n1,1,1,1,1,1,0,0,2,2,2,2,2,0,1,1,0,1,1,1,1,1,0,0,1,0,0,0,1,1,0,1,\n2,3,1,2,1,0,1,1,0,2,2,2,0,0,1,0,0,1,1,1,1,0,0,0,0,0,0,0,1,0,1,2,\n1,1,1,1,2,1,1,1,1,1,1,1,1,0,1,1,0,1,0,1,0,1,0,0,1,0,0,0,0,1,0,0,\n2,2,2,2,2,0,0,2,0,0,2,0,0,0,0,0,0,1,0,1,0,0,0,0,0,0,0,0,2,0,2,2,\n1,1,1,1,1,0,0,1,2,1,1,0,1,0,1,0,0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,\n1,2,2,2,2,0,0,2,0,1,1,0,0,0,1,0,0,2,0,2,0,0,0,0,0,0,0,0,0,0,1,1,\n0,0,0,1,1,1,1,1,1,1,1,1,1,0,1,0,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,\n1,2,2,3,2,0,0,1,0,0,1,0,0,0,0,0,0,1,0,2,0,0,0,1,0,0,0,0,0,0,0,2,\n1,1,0,0,1,0,0,0,1,1,0,0,1,0,1,1,0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,\n2,1,2,2,2,1,2,1,2,2,1,1,2,1,1,1,0,1,1,1,1,2,0,1,0,1,1,1,1,0,1,1,\n1,1,2,1,1,1,1,1,1,0,0,1,2,1,1,1,1,1,1,0,0,1,1,1,0,0,0,0,0,0,0,0,\n1,0,0,1,3,1,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,\n0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,\n2,2,2,2,1,0,0,1,0,2,0,0,0,0,0,1,1,1,0,1,0,0,0,0,0,0,0,0,2,0,0,1,\n0,2,0,1,0,0,1,1,2,0,1,0,1,0,1,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,\n1,2,2,2,2,0,1,1,0,2,1,0,1,1,1,0,0,1,0,2,0,1,0,0,0,0,0,0,0,0,0,1,\n0,1,0,0,1,0,0,0,1,1,0,0,1,0,0,1,0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,\n2,2,2,2,2,0,0,1,0,0,0,1,0,1,0,0,0,1,0,1,0,0,0,0,0,0,0,0,0,0,0,1,\n0,1,0,1,1,1,0,0,1,1,1,0,1,0,0,0,0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,\n2,0,1,0,0,1,2,1,1,1,1,1,1,2,2,1,0,0,1,0,1,0,0,0,0,1,1,1,1,0,0,0,\n1,1,2,1,1,1,1,0,0,0,1,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,\n2,2,1,2,1,0,0,1,0,0,0,0,0,0,0,0,1,1,0,1,0,0,0,0,0,0,0,0,0,0,0,1,\n0,0,0,0,0,0,0,0,1,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,\n3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,\n0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,\n1,0,0,1,2,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,0,0,0,\n0,1,1,0,1,1,1,0,0,1,0,0,1,0,1,0,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,\n1,0,1,0,0,1,1,1,1,1,1,1,1,1,1,1,0,0,1,0,2,0,0,2,0,1,0,0,1,0,0,1,\n1,1,0,0,1,1,0,1,0,0,0,1,0,0,1,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,\n0,0,0,0,0,0,1,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,1,0,\n1,1,1,1,1,1,1,2,0,0,0,0,0,0,2,1,0,1,1,0,0,1,1,1,0,1,0,0,0,0,0,0,\n2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,\n0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,\n1,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,1,0,1,1,0,1,1,1,1,1,0,1,0,0,\n0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,\n)\n\nLatin5BulgarianModel = {\n 'charToOrderMap': Latin5_BulgarianCharToOrderMap,\n 'precedenceMatrix': BulgarianLangModel,\n 'mTypicalPositiveRatio': 0.969392,\n 'keepEnglishLetter': False,\n 'charsetName': \"ISO-8859-5\"\n}\n\nWin1251BulgarianModel = {\n 'charToOrderMap': win1251BulgarianCharToOrderMap,\n 'precedenceMatrix': BulgarianLangModel,\n 'mTypicalPositiveRatio': 0.969392,\n 'keepEnglishLetter': False,\n 'charsetName': \"windows-1251\"\n}\n\n\n# flake8: noqa\n"},"license":{"kind":"string","value":"mit"}}},{"rowIdx":2728,"cells":{"repo_name":{"kind":"string","value":"huijunwu/heron"},"path":{"kind":"string","value":"heron/shell/src/python/handlers/pmaphandler.py"},"copies":{"kind":"string","value":"5"},"size":{"kind":"string","value":"1352"},"content":{"kind":"string","value":"#!/usr/bin/env python\n# -*- encoding: utf-8 -*-\n\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\n\n''' pmaphandler.py '''\nimport json\nimport tornado.web\n\nfrom heron.shell.src.python import utils\n\nclass PmapHandler(tornado.web.RequestHandler):\n \"\"\"\n Responsible for reporting memory map of a process given its pid.\n \"\"\"\n\n # pylint: disable=attribute-defined-outside-init\n @tornado.web.asynchronous\n def get(self, pid):\n ''' get method '''\n body = utils.str_cmd(['pmap', '-pXX', pid], None, None)\n self.content_type = 'application/json'\n self.write(json.dumps(body))\n self.finish()\n"},"license":{"kind":"string","value":"apache-2.0"}}},{"rowIdx":2729,"cells":{"repo_name":{"kind":"string","value":"NixaSoftware/CVis"},"path":{"kind":"string","value":"venv/lib/python2.7/site-packages/pygments/lexers/_vim_builtins.py"},"copies":{"kind":"string","value":"31"},"size":{"kind":"string","value":"57090"},"content":{"kind":"string","value":"# -*- coding: utf-8 -*-\n\"\"\"\n pygments.lexers._vim_builtins\n ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n This file is autogenerated by scripts/get_vimkw.py\n\n :copyright: Copyright 2006-2017 by the Pygments team, see AUTHORS.\n :license: BSD, see LICENSE for details.\n\"\"\"\n\n# Split up in multiple functions so it's importable by jython, which has a\n# per-method size limit.\n\ndef _getauto():\n var = (\n ('BufAdd','BufAdd'),\n ('BufCreate','BufCreate'),\n ('BufDelete','BufDelete'),\n ('BufEnter','BufEnter'),\n ('BufFilePost','BufFilePost'),\n ('BufFilePre','BufFilePre'),\n ('BufHidden','BufHidden'),\n ('BufLeave','BufLeave'),\n ('BufNew','BufNew'),\n ('BufNewFile','BufNewFile'),\n ('BufRead','BufRead'),\n ('BufReadCmd','BufReadCmd'),\n ('BufReadPost','BufReadPost'),\n ('BufReadPre','BufReadPre'),\n ('BufUnload','BufUnload'),\n ('BufWinEnter','BufWinEnter'),\n ('BufWinLeave','BufWinLeave'),\n ('BufWipeout','BufWipeout'),\n ('BufWrite','BufWrite'),\n ('BufWriteCmd','BufWriteCmd'),\n ('BufWritePost','BufWritePost'),\n ('BufWritePre','BufWritePre'),\n ('Cmd','Cmd'),\n ('CmdwinEnter','CmdwinEnter'),\n ('CmdwinLeave','CmdwinLeave'),\n ('ColorScheme','ColorScheme'),\n ('CompleteDone','CompleteDone'),\n ('CursorHold','CursorHold'),\n ('CursorHoldI','CursorHoldI'),\n ('CursorMoved','CursorMoved'),\n ('CursorMovedI','CursorMovedI'),\n ('EncodingChanged','EncodingChanged'),\n ('FileAppendCmd','FileAppendCmd'),\n ('FileAppendPost','FileAppendPost'),\n ('FileAppendPre','FileAppendPre'),\n ('FileChangedRO','FileChangedRO'),\n ('FileChangedShell','FileChangedShell'),\n ('FileChangedShellPost','FileChangedShellPost'),\n ('FileEncoding','FileEncoding'),\n ('FileReadCmd','FileReadCmd'),\n ('FileReadPost','FileReadPost'),\n ('FileReadPre','FileReadPre'),\n ('FileType','FileType'),\n ('FileWriteCmd','FileWriteCmd'),\n ('FileWritePost','FileWritePost'),\n ('FileWritePre','FileWritePre'),\n ('FilterReadPost','FilterReadPost'),\n ('FilterReadPre','FilterReadPre'),\n ('FilterWritePost','FilterWritePost'),\n ('FilterWritePre','FilterWritePre'),\n ('FocusGained','FocusGained'),\n ('FocusLost','FocusLost'),\n ('FuncUndefined','FuncUndefined'),\n ('GUIEnter','GUIEnter'),\n ('GUIFailed','GUIFailed'),\n ('InsertChange','InsertChange'),\n ('InsertCharPre','InsertCharPre'),\n ('InsertEnter','InsertEnter'),\n ('InsertLeave','InsertLeave'),\n ('MenuPopup','MenuPopup'),\n ('QuickFixCmdPost','QuickFixCmdPost'),\n ('QuickFixCmdPre','QuickFixCmdPre'),\n ('QuitPre','QuitPre'),\n ('RemoteReply','RemoteReply'),\n ('SessionLoadPost','SessionLoadPost'),\n ('ShellCmdPost','ShellCmdPost'),\n ('ShellFilterPost','ShellFilterPost'),\n ('SourceCmd','SourceCmd'),\n ('SourcePre','SourcePre'),\n ('SpellFileMissing','SpellFileMissing'),\n ('StdinReadPost','StdinReadPost'),\n ('StdinReadPre','StdinReadPre'),\n ('SwapExists','SwapExists'),\n ('Syntax','Syntax'),\n ('TabEnter','TabEnter'),\n ('TabLeave','TabLeave'),\n ('TermChanged','TermChanged'),\n ('TermResponse','TermResponse'),\n ('TextChanged','TextChanged'),\n ('TextChangedI','TextChangedI'),\n ('User','User'),\n ('UserGettingBored','UserGettingBored'),\n ('VimEnter','VimEnter'),\n ('VimLeave','VimLeave'),\n ('VimLeavePre','VimLeavePre'),\n ('VimResized','VimResized'),\n ('WinEnter','WinEnter'),\n ('WinLeave','WinLeave'),\n ('event','event'),\n )\n return var\nauto = _getauto()\n\ndef _getcommand():\n var = (\n ('a','a'),\n ('ab','ab'),\n ('abc','abclear'),\n ('abo','aboveleft'),\n ('al','all'),\n ('ar','ar'),\n ('ar','args'),\n ('arga','argadd'),\n ('argd','argdelete'),\n ('argdo','argdo'),\n ('arge','argedit'),\n ('argg','argglobal'),\n ('argl','arglocal'),\n ('argu','argument'),\n ('as','ascii'),\n ('au','au'),\n ('b','buffer'),\n ('bN','bNext'),\n ('ba','ball'),\n ('bad','badd'),\n ('bd','bdelete'),\n ('bel','belowright'),\n ('bf','bfirst'),\n ('bl','blast'),\n ('bm','bmodified'),\n ('bn','bnext'),\n ('bo','botright'),\n ('bp','bprevious'),\n ('br','br'),\n ('br','brewind'),\n ('brea','break'),\n ('breaka','breakadd'),\n ('breakd','breakdel'),\n ('breakl','breaklist'),\n ('bro','browse'),\n ('bu','bu'),\n ('buf','buf'),\n ('bufdo','bufdo'),\n ('buffers','buffers'),\n ('bun','bunload'),\n ('bw','bwipeout'),\n ('c','c'),\n ('c','change'),\n ('cN','cN'),\n ('cN','cNext'),\n ('cNf','cNf'),\n ('cNf','cNfile'),\n ('cabc','cabclear'),\n ('cad','cad'),\n ('cad','caddexpr'),\n ('caddb','caddbuffer'),\n ('caddf','caddfile'),\n ('cal','call'),\n ('cat','catch'),\n ('cb','cbuffer'),\n ('cc','cc'),\n ('ccl','cclose'),\n ('cd','cd'),\n ('ce','center'),\n ('cex','cexpr'),\n ('cf','cfile'),\n ('cfir','cfirst'),\n ('cg','cgetfile'),\n ('cgetb','cgetbuffer'),\n ('cgete','cgetexpr'),\n ('changes','changes'),\n ('chd','chdir'),\n ('che','checkpath'),\n ('checkt','checktime'),\n ('cl','cl'),\n ('cl','clist'),\n ('cla','clast'),\n ('clo','close'),\n ('cmapc','cmapclear'),\n ('cn','cn'),\n ('cn','cnext'),\n ('cnew','cnewer'),\n ('cnf','cnf'),\n ('cnf','cnfile'),\n ('co','copy'),\n ('col','colder'),\n ('colo','colorscheme'),\n ('com','com'),\n ('comc','comclear'),\n ('comp','compiler'),\n ('con','con'),\n ('con','continue'),\n ('conf','confirm'),\n ('cope','copen'),\n ('cp','cprevious'),\n ('cpf','cpfile'),\n ('cq','cquit'),\n ('cr','crewind'),\n ('cs','cs'),\n ('cscope','cscope'),\n ('cstag','cstag'),\n ('cuna','cunabbrev'),\n ('cw','cwindow'),\n ('d','d'),\n ('d','delete'),\n ('de','de'),\n ('debug','debug'),\n ('debugg','debuggreedy'),\n ('del','del'),\n ('delc','delcommand'),\n ('delel','delel'),\n ('delep','delep'),\n ('deletel','deletel'),\n ('deletep','deletep'),\n ('deletl','deletl'),\n ('deletp','deletp'),\n ('delf','delf'),\n ('delf','delfunction'),\n ('dell','dell'),\n ('delm','delmarks'),\n ('delp','delp'),\n ('dep','dep'),\n ('di','di'),\n ('di','display'),\n ('diffg','diffget'),\n ('diffo','diffoff'),\n ('diffp','diffpatch'),\n ('diffpu','diffput'),\n ('diffs','diffsplit'),\n ('difft','diffthis'),\n ('diffu','diffupdate'),\n ('dig','dig'),\n ('dig','digraphs'),\n ('dir','dir'),\n ('dj','djump'),\n ('dl','dl'),\n ('dli','dlist'),\n ('do','do'),\n ('doau','doau'),\n ('dp','dp'),\n ('dr','drop'),\n ('ds','dsearch'),\n ('dsp','dsplit'),\n ('e','e'),\n ('e','edit'),\n ('ea','ea'),\n ('earlier','earlier'),\n ('ec','ec'),\n ('echoe','echoerr'),\n ('echom','echomsg'),\n ('echon','echon'),\n ('el','else'),\n ('elsei','elseif'),\n ('em','emenu'),\n ('en','en'),\n ('en','endif'),\n ('endf','endf'),\n ('endf','endfunction'),\n ('endfo','endfor'),\n ('endfun','endfun'),\n ('endt','endtry'),\n ('endw','endwhile'),\n ('ene','enew'),\n ('ex','ex'),\n ('exi','exit'),\n ('exu','exusage'),\n ('f','f'),\n ('f','file'),\n ('files','files'),\n ('filet','filet'),\n ('filetype','filetype'),\n ('fin','fin'),\n ('fin','find'),\n ('fina','finally'),\n ('fini','finish'),\n ('fir','first'),\n ('fix','fixdel'),\n ('fo','fold'),\n ('foldc','foldclose'),\n ('foldd','folddoopen'),\n ('folddoc','folddoclosed'),\n ('foldo','foldopen'),\n ('for','for'),\n ('fu','fu'),\n ('fu','function'),\n ('fun','fun'),\n ('g','g'),\n ('go','goto'),\n ('gr','grep'),\n ('grepa','grepadd'),\n ('gui','gui'),\n ('gvim','gvim'),\n ('h','h'),\n ('h','help'),\n ('ha','hardcopy'),\n ('helpf','helpfind'),\n ('helpg','helpgrep'),\n ('helpt','helptags'),\n ('hi','hi'),\n ('hid','hide'),\n ('his','history'),\n ('i','i'),\n ('ia','ia'),\n ('iabc','iabclear'),\n ('if','if'),\n ('ij','ijump'),\n ('il','ilist'),\n ('imapc','imapclear'),\n ('in','in'),\n ('intro','intro'),\n ('is','isearch'),\n ('isp','isplit'),\n ('iuna','iunabbrev'),\n ('j','join'),\n ('ju','jumps'),\n ('k','k'),\n ('kee','keepmarks'),\n ('keepa','keepa'),\n ('keepalt','keepalt'),\n ('keepj','keepjumps'),\n ('keepp','keeppatterns'),\n ('l','l'),\n ('l','list'),\n ('lN','lN'),\n ('lN','lNext'),\n ('lNf','lNf'),\n ('lNf','lNfile'),\n ('la','la'),\n ('la','last'),\n ('lad','lad'),\n ('lad','laddexpr'),\n ('laddb','laddbuffer'),\n ('laddf','laddfile'),\n ('lan','lan'),\n ('lan','language'),\n ('lat','lat'),\n ('later','later'),\n ('lb','lbuffer'),\n ('lc','lcd'),\n ('lch','lchdir'),\n ('lcl','lclose'),\n ('lcs','lcs'),\n ('lcscope','lcscope'),\n ('le','left'),\n ('lefta','leftabove'),\n ('lex','lexpr'),\n ('lf','lfile'),\n ('lfir','lfirst'),\n ('lg','lgetfile'),\n ('lgetb','lgetbuffer'),\n ('lgete','lgetexpr'),\n ('lgr','lgrep'),\n ('lgrepa','lgrepadd'),\n ('lh','lhelpgrep'),\n ('ll','ll'),\n ('lla','llast'),\n ('lli','llist'),\n ('lmak','lmake'),\n ('lmapc','lmapclear'),\n ('lne','lne'),\n ('lne','lnext'),\n ('lnew','lnewer'),\n ('lnf','lnf'),\n ('lnf','lnfile'),\n ('lo','lo'),\n ('lo','loadview'),\n ('loadk','loadk'),\n ('loadkeymap','loadkeymap'),\n ('loc','lockmarks'),\n ('lockv','lockvar'),\n ('lol','lolder'),\n ('lop','lopen'),\n ('lp','lprevious'),\n ('lpf','lpfile'),\n ('lr','lrewind'),\n ('ls','ls'),\n ('lt','ltag'),\n ('lua','lua'),\n ('luado','luado'),\n ('luafile','luafile'),\n ('lv','lvimgrep'),\n ('lvimgrepa','lvimgrepadd'),\n ('lw','lwindow'),\n ('m','move'),\n ('ma','ma'),\n ('ma','mark'),\n ('mak','make'),\n ('marks','marks'),\n ('mat','match'),\n ('menut','menut'),\n ('menut','menutranslate'),\n ('mes','mes'),\n ('messages','messages'),\n ('mk','mk'),\n ('mk','mkexrc'),\n ('mks','mksession'),\n ('mksp','mkspell'),\n ('mkv','mkv'),\n ('mkv','mkvimrc'),\n ('mkvie','mkview'),\n ('mo','mo'),\n ('mod','mode'),\n ('mz','mz'),\n ('mz','mzscheme'),\n ('mzf','mzfile'),\n ('n','n'),\n ('n','next'),\n ('nb','nbkey'),\n ('nbc','nbclose'),\n ('nbs','nbstart'),\n ('ne','ne'),\n ('new','new'),\n ('nmapc','nmapclear'),\n ('noa','noa'),\n ('noautocmd','noautocmd'),\n ('noh','nohlsearch'),\n ('nu','number'),\n ('o','o'),\n ('o','open'),\n ('ol','oldfiles'),\n ('omapc','omapclear'),\n ('on','only'),\n ('opt','options'),\n ('ownsyntax','ownsyntax'),\n ('p','p'),\n ('p','print'),\n ('pc','pclose'),\n ('pe','pe'),\n ('pe','perl'),\n ('ped','pedit'),\n ('perld','perldo'),\n ('po','pop'),\n ('popu','popu'),\n ('popu','popup'),\n ('pp','ppop'),\n ('pr','pr'),\n ('pre','preserve'),\n ('prev','previous'),\n ('pro','pro'),\n ('prof','profile'),\n ('profd','profdel'),\n ('promptf','promptfind'),\n ('promptr','promptrepl'),\n ('ps','psearch'),\n ('ptN','ptN'),\n ('ptN','ptNext'),\n ('pta','ptag'),\n ('ptf','ptfirst'),\n ('ptj','ptjump'),\n ('ptl','ptlast'),\n ('ptn','ptn'),\n ('ptn','ptnext'),\n ('ptp','ptprevious'),\n ('ptr','ptrewind'),\n ('pts','ptselect'),\n ('pu','put'),\n ('pw','pwd'),\n ('py','py'),\n ('py','python'),\n ('py3','py3'),\n ('py3','py3'),\n ('py3do','py3do'),\n ('pydo','pydo'),\n ('pyf','pyfile'),\n ('python3','python3'),\n ('q','q'),\n ('q','quit'),\n ('qa','qall'),\n ('quita','quitall'),\n ('r','r'),\n ('r','read'),\n ('re','re'),\n ('rec','recover'),\n ('red','red'),\n ('red','redo'),\n ('redi','redir'),\n ('redr','redraw'),\n ('redraws','redrawstatus'),\n ('reg','registers'),\n ('res','resize'),\n ('ret','retab'),\n ('retu','return'),\n ('rew','rewind'),\n ('ri','right'),\n ('rightb','rightbelow'),\n ('ru','ru'),\n ('ru','runtime'),\n ('rub','ruby'),\n ('rubyd','rubydo'),\n ('rubyf','rubyfile'),\n ('rundo','rundo'),\n ('rv','rviminfo'),\n ('sN','sNext'),\n ('sa','sargument'),\n ('sal','sall'),\n ('san','sandbox'),\n ('sav','saveas'),\n ('sb','sbuffer'),\n ('sbN','sbNext'),\n ('sba','sball'),\n ('sbf','sbfirst'),\n ('sbl','sblast'),\n ('sbm','sbmodified'),\n ('sbn','sbnext'),\n ('sbp','sbprevious'),\n ('sbr','sbrewind'),\n ('scrip','scrip'),\n ('scrip','scriptnames'),\n ('scripte','scriptencoding'),\n ('scs','scs'),\n ('scscope','scscope'),\n ('se','set'),\n ('setf','setfiletype'),\n ('setg','setglobal'),\n ('setl','setlocal'),\n ('sf','sfind'),\n ('sfir','sfirst'),\n ('sh','shell'),\n ('si','si'),\n ('sig','sig'),\n ('sign','sign'),\n ('sil','silent'),\n ('sim','simalt'),\n ('sl','sl'),\n ('sl','sleep'),\n ('sla','slast'),\n ('sm','smagic'),\n ('sm','smap'),\n ('sme','sme'),\n ('smenu','smenu'),\n ('sn','snext'),\n ('sni','sniff'),\n ('sno','snomagic'),\n ('snoreme','snoreme'),\n ('snoremenu','snoremenu'),\n ('so','so'),\n ('so','source'),\n ('sor','sort'),\n ('sp','split'),\n ('spe','spe'),\n ('spe','spellgood'),\n ('spelld','spelldump'),\n ('spelli','spellinfo'),\n ('spellr','spellrepall'),\n ('spellu','spellundo'),\n ('spellw','spellwrong'),\n ('spr','sprevious'),\n ('sre','srewind'),\n ('st','st'),\n ('st','stop'),\n ('sta','stag'),\n ('star','star'),\n ('star','startinsert'),\n ('start','start'),\n ('startg','startgreplace'),\n ('startr','startreplace'),\n ('stj','stjump'),\n ('stopi','stopinsert'),\n ('sts','stselect'),\n ('sun','sunhide'),\n ('sunme','sunme'),\n ('sunmenu','sunmenu'),\n ('sus','suspend'),\n ('sv','sview'),\n ('sw','swapname'),\n ('sy','sy'),\n ('syn','syn'),\n ('sync','sync'),\n ('syncbind','syncbind'),\n ('syntime','syntime'),\n ('t','t'),\n ('tN','tN'),\n ('tN','tNext'),\n ('ta','ta'),\n ('ta','tag'),\n ('tab','tab'),\n ('tabN','tabN'),\n ('tabN','tabNext'),\n ('tabc','tabclose'),\n ('tabd','tabdo'),\n ('tabe','tabedit'),\n ('tabf','tabfind'),\n ('tabfir','tabfirst'),\n ('tabl','tablast'),\n ('tabm','tabmove'),\n ('tabn','tabnext'),\n ('tabnew','tabnew'),\n ('tabo','tabonly'),\n ('tabp','tabprevious'),\n ('tabr','tabrewind'),\n ('tabs','tabs'),\n ('tags','tags'),\n ('tc','tcl'),\n ('tcld','tcldo'),\n ('tclf','tclfile'),\n ('te','tearoff'),\n ('tf','tfirst'),\n ('th','throw'),\n ('tj','tjump'),\n ('tl','tlast'),\n ('tm','tm'),\n ('tm','tmenu'),\n ('tn','tn'),\n ('tn','tnext'),\n ('to','topleft'),\n ('tp','tprevious'),\n ('tr','tr'),\n ('tr','trewind'),\n ('try','try'),\n ('ts','tselect'),\n ('tu','tu'),\n ('tu','tunmenu'),\n ('u','u'),\n ('u','undo'),\n ('un','un'),\n ('una','unabbreviate'),\n ('undoj','undojoin'),\n ('undol','undolist'),\n ('unh','unhide'),\n ('unl','unl'),\n ('unlo','unlockvar'),\n ('uns','unsilent'),\n ('up','update'),\n ('v','v'),\n ('ve','ve'),\n ('ve','version'),\n ('verb','verbose'),\n ('vert','vertical'),\n ('vi','vi'),\n ('vi','visual'),\n ('vie','view'),\n ('vim','vimgrep'),\n ('vimgrepa','vimgrepadd'),\n ('viu','viusage'),\n ('vmapc','vmapclear'),\n ('vne','vnew'),\n ('vs','vsplit'),\n ('w','w'),\n ('w','write'),\n ('wN','wNext'),\n ('wa','wall'),\n ('wh','while'),\n ('win','win'),\n ('win','winsize'),\n ('winc','wincmd'),\n ('windo','windo'),\n ('winp','winpos'),\n ('wn','wnext'),\n ('wp','wprevious'),\n ('wq','wq'),\n ('wqa','wqall'),\n ('ws','wsverb'),\n ('wundo','wundo'),\n ('wv','wviminfo'),\n ('x','x'),\n ('x','xit'),\n ('xa','xall'),\n ('xmapc','xmapclear'),\n ('xme','xme'),\n ('xmenu','xmenu'),\n ('xnoreme','xnoreme'),\n ('xnoremenu','xnoremenu'),\n ('xunme','xunme'),\n ('xunmenu','xunmenu'),\n ('xwininfo','xwininfo'),\n ('y','yank'),\n )\n return var\ncommand = _getcommand()\n\ndef _getoption():\n var = (\n ('acd','acd'),\n ('ai','ai'),\n ('akm','akm'),\n ('al','al'),\n ('aleph','aleph'),\n ('allowrevins','allowrevins'),\n ('altkeymap','altkeymap'),\n ('ambiwidth','ambiwidth'),\n ('ambw','ambw'),\n ('anti','anti'),\n ('antialias','antialias'),\n ('ar','ar'),\n ('arab','arab'),\n ('arabic','arabic'),\n ('arabicshape','arabicshape'),\n ('ari','ari'),\n ('arshape','arshape'),\n ('autochdir','autochdir'),\n ('autoindent','autoindent'),\n ('autoread','autoread'),\n ('autowrite','autowrite'),\n ('autowriteall','autowriteall'),\n ('aw','aw'),\n ('awa','awa'),\n ('background','background'),\n ('backspace','backspace'),\n ('backup','backup'),\n ('backupcopy','backupcopy'),\n ('backupdir','backupdir'),\n ('backupext','backupext'),\n ('backupskip','backupskip'),\n ('balloondelay','balloondelay'),\n ('ballooneval','ballooneval'),\n ('balloonexpr','balloonexpr'),\n ('bdir','bdir'),\n ('bdlay','bdlay'),\n ('beval','beval'),\n ('bex','bex'),\n ('bexpr','bexpr'),\n ('bg','bg'),\n ('bh','bh'),\n ('bin','bin'),\n ('binary','binary'),\n ('biosk','biosk'),\n ('bioskey','bioskey'),\n ('bk','bk'),\n ('bkc','bkc'),\n ('bl','bl'),\n ('bomb','bomb'),\n ('breakat','breakat'),\n ('brk','brk'),\n ('browsedir','browsedir'),\n ('bs','bs'),\n ('bsdir','bsdir'),\n ('bsk','bsk'),\n ('bt','bt'),\n ('bufhidden','bufhidden'),\n ('buflisted','buflisted'),\n ('buftype','buftype'),\n ('casemap','casemap'),\n ('cb','cb'),\n ('cc','cc'),\n ('ccv','ccv'),\n ('cd','cd'),\n ('cdpath','cdpath'),\n ('cedit','cedit'),\n ('cf','cf'),\n ('cfu','cfu'),\n ('ch','ch'),\n ('charconvert','charconvert'),\n ('ci','ci'),\n ('cin','cin'),\n ('cindent','cindent'),\n ('cink','cink'),\n ('cinkeys','cinkeys'),\n ('cino','cino'),\n ('cinoptions','cinoptions'),\n ('cinw','cinw'),\n ('cinwords','cinwords'),\n ('clipboard','clipboard'),\n ('cmdheight','cmdheight'),\n ('cmdwinheight','cmdwinheight'),\n ('cmp','cmp'),\n ('cms','cms'),\n ('co','co'),\n ('cocu','cocu'),\n ('cole','cole'),\n ('colorcolumn','colorcolumn'),\n ('columns','columns'),\n ('com','com'),\n ('comments','comments'),\n ('commentstring','commentstring'),\n ('compatible','compatible'),\n ('complete','complete'),\n ('completefunc','completefunc'),\n ('completeopt','completeopt'),\n ('concealcursor','concealcursor'),\n ('conceallevel','conceallevel'),\n ('confirm','confirm'),\n ('consk','consk'),\n ('conskey','conskey'),\n ('copyindent','copyindent'),\n ('cot','cot'),\n ('cp','cp'),\n ('cpo','cpo'),\n ('cpoptions','cpoptions'),\n ('cpt','cpt'),\n ('crb','crb'),\n ('cryptmethod','cryptmethod'),\n ('cscopepathcomp','cscopepathcomp'),\n ('cscopeprg','cscopeprg'),\n ('cscopequickfix','cscopequickfix'),\n ('cscoperelative','cscoperelative'),\n ('cscopetag','cscopetag'),\n ('cscopetagorder','cscopetagorder'),\n ('cscopeverbose','cscopeverbose'),\n ('cspc','cspc'),\n ('csprg','csprg'),\n ('csqf','csqf'),\n ('csre','csre'),\n ('cst','cst'),\n ('csto','csto'),\n ('csverb','csverb'),\n ('cuc','cuc'),\n ('cul','cul'),\n ('cursorbind','cursorbind'),\n ('cursorcolumn','cursorcolumn'),\n ('cursorline','cursorline'),\n ('cwh','cwh'),\n ('debug','debug'),\n ('deco','deco'),\n ('def','def'),\n ('define','define'),\n ('delcombine','delcombine'),\n ('dex','dex'),\n ('dg','dg'),\n ('dict','dict'),\n ('dictionary','dictionary'),\n ('diff','diff'),\n ('diffexpr','diffexpr'),\n ('diffopt','diffopt'),\n ('digraph','digraph'),\n ('dip','dip'),\n ('dir','dir'),\n ('directory','directory'),\n ('display','display'),\n ('dy','dy'),\n ('ea','ea'),\n ('ead','ead'),\n ('eadirection','eadirection'),\n ('eb','eb'),\n ('ed','ed'),\n ('edcompatible','edcompatible'),\n ('ef','ef'),\n ('efm','efm'),\n ('ei','ei'),\n ('ek','ek'),\n ('enc','enc'),\n ('encoding','encoding'),\n ('endofline','endofline'),\n ('eol','eol'),\n ('ep','ep'),\n ('equalalways','equalalways'),\n ('equalprg','equalprg'),\n ('errorbells','errorbells'),\n ('errorfile','errorfile'),\n ('errorformat','errorformat'),\n ('esckeys','esckeys'),\n ('et','et'),\n ('eventignore','eventignore'),\n ('ex','ex'),\n ('expandtab','expandtab'),\n ('exrc','exrc'),\n ('fcl','fcl'),\n ('fcs','fcs'),\n ('fdc','fdc'),\n ('fde','fde'),\n ('fdi','fdi'),\n ('fdl','fdl'),\n ('fdls','fdls'),\n ('fdm','fdm'),\n ('fdn','fdn'),\n ('fdo','fdo'),\n ('fdt','fdt'),\n ('fen','fen'),\n ('fenc','fenc'),\n ('fencs','fencs'),\n ('fex','fex'),\n ('ff','ff'),\n ('ffs','ffs'),\n ('fic','fic'),\n ('fileencoding','fileencoding'),\n ('fileencodings','fileencodings'),\n ('fileformat','fileformat'),\n ('fileformats','fileformats'),\n ('fileignorecase','fileignorecase'),\n ('filetype','filetype'),\n ('fillchars','fillchars'),\n ('fk','fk'),\n ('fkmap','fkmap'),\n ('flp','flp'),\n ('fml','fml'),\n ('fmr','fmr'),\n ('fo','fo'),\n ('foldclose','foldclose'),\n ('foldcolumn','foldcolumn'),\n ('foldenable','foldenable'),\n ('foldexpr','foldexpr'),\n ('foldignore','foldignore'),\n ('foldlevel','foldlevel'),\n ('foldlevelstart','foldlevelstart'),\n ('foldmarker','foldmarker'),\n ('foldmethod','foldmethod'),\n ('foldminlines','foldminlines'),\n ('foldnestmax','foldnestmax'),\n ('foldopen','foldopen'),\n ('foldtext','foldtext'),\n ('formatexpr','formatexpr'),\n ('formatlistpat','formatlistpat'),\n ('formatoptions','formatoptions'),\n ('formatprg','formatprg'),\n ('fp','fp'),\n ('fs','fs'),\n ('fsync','fsync'),\n ('ft','ft'),\n ('gcr','gcr'),\n ('gd','gd'),\n ('gdefault','gdefault'),\n ('gfm','gfm'),\n ('gfn','gfn'),\n ('gfs','gfs'),\n ('gfw','gfw'),\n ('ghr','ghr'),\n ('go','go'),\n ('gp','gp'),\n ('grepformat','grepformat'),\n ('grepprg','grepprg'),\n ('gtl','gtl'),\n ('gtt','gtt'),\n ('guicursor','guicursor'),\n ('guifont','guifont'),\n ('guifontset','guifontset'),\n ('guifontwide','guifontwide'),\n ('guiheadroom','guiheadroom'),\n ('guioptions','guioptions'),\n ('guipty','guipty'),\n ('guitablabel','guitablabel'),\n ('guitabtooltip','guitabtooltip'),\n ('helpfile','helpfile'),\n ('helpheight','helpheight'),\n ('helplang','helplang'),\n ('hf','hf'),\n ('hh','hh'),\n ('hi','hi'),\n ('hid','hid'),\n ('hidden','hidden'),\n ('highlight','highlight'),\n ('history','history'),\n ('hk','hk'),\n ('hkmap','hkmap'),\n ('hkmapp','hkmapp'),\n ('hkp','hkp'),\n ('hl','hl'),\n ('hlg','hlg'),\n ('hls','hls'),\n ('hlsearch','hlsearch'),\n ('ic','ic'),\n ('icon','icon'),\n ('iconstring','iconstring'),\n ('ignorecase','ignorecase'),\n ('im','im'),\n ('imactivatefunc','imactivatefunc'),\n ('imactivatekey','imactivatekey'),\n ('imaf','imaf'),\n ('imak','imak'),\n ('imc','imc'),\n ('imcmdline','imcmdline'),\n ('imd','imd'),\n ('imdisable','imdisable'),\n ('imi','imi'),\n ('iminsert','iminsert'),\n ('ims','ims'),\n ('imsearch','imsearch'),\n ('imsf','imsf'),\n ('imstatusfunc','imstatusfunc'),\n ('inc','inc'),\n ('include','include'),\n ('includeexpr','includeexpr'),\n ('incsearch','incsearch'),\n ('inde','inde'),\n ('indentexpr','indentexpr'),\n ('indentkeys','indentkeys'),\n ('indk','indk'),\n ('inex','inex'),\n ('inf','inf'),\n ('infercase','infercase'),\n ('inoremap','inoremap'),\n ('insertmode','insertmode'),\n ('invacd','invacd'),\n ('invai','invai'),\n ('invakm','invakm'),\n ('invallowrevins','invallowrevins'),\n ('invaltkeymap','invaltkeymap'),\n ('invanti','invanti'),\n ('invantialias','invantialias'),\n ('invar','invar'),\n ('invarab','invarab'),\n ('invarabic','invarabic'),\n ('invarabicshape','invarabicshape'),\n ('invari','invari'),\n ('invarshape','invarshape'),\n ('invautochdir','invautochdir'),\n ('invautoindent','invautoindent'),\n ('invautoread','invautoread'),\n ('invautowrite','invautowrite'),\n ('invautowriteall','invautowriteall'),\n ('invaw','invaw'),\n ('invawa','invawa'),\n ('invbackup','invbackup'),\n ('invballooneval','invballooneval'),\n ('invbeval','invbeval'),\n ('invbin','invbin'),\n ('invbinary','invbinary'),\n ('invbiosk','invbiosk'),\n ('invbioskey','invbioskey'),\n ('invbk','invbk'),\n ('invbl','invbl'),\n ('invbomb','invbomb'),\n ('invbuflisted','invbuflisted'),\n ('invcf','invcf'),\n ('invci','invci'),\n ('invcin','invcin'),\n ('invcindent','invcindent'),\n ('invcompatible','invcompatible'),\n ('invconfirm','invconfirm'),\n ('invconsk','invconsk'),\n ('invconskey','invconskey'),\n ('invcopyindent','invcopyindent'),\n ('invcp','invcp'),\n ('invcrb','invcrb'),\n ('invcscoperelative','invcscoperelative'),\n ('invcscopetag','invcscopetag'),\n ('invcscopeverbose','invcscopeverbose'),\n ('invcsre','invcsre'),\n ('invcst','invcst'),\n ('invcsverb','invcsverb'),\n ('invcuc','invcuc'),\n ('invcul','invcul'),\n ('invcursorbind','invcursorbind'),\n ('invcursorcolumn','invcursorcolumn'),\n ('invcursorline','invcursorline'),\n ('invdeco','invdeco'),\n ('invdelcombine','invdelcombine'),\n ('invdg','invdg'),\n ('invdiff','invdiff'),\n ('invdigraph','invdigraph'),\n ('invea','invea'),\n ('inveb','inveb'),\n ('inved','inved'),\n ('invedcompatible','invedcompatible'),\n ('invek','invek'),\n ('invendofline','invendofline'),\n ('inveol','inveol'),\n ('invequalalways','invequalalways'),\n ('inverrorbells','inverrorbells'),\n ('invesckeys','invesckeys'),\n ('invet','invet'),\n ('invex','invex'),\n ('invexpandtab','invexpandtab'),\n ('invexrc','invexrc'),\n ('invfen','invfen'),\n ('invfic','invfic'),\n ('invfileignorecase','invfileignorecase'),\n ('invfk','invfk'),\n ('invfkmap','invfkmap'),\n ('invfoldenable','invfoldenable'),\n ('invgd','invgd'),\n ('invgdefault','invgdefault'),\n ('invguipty','invguipty'),\n ('invhid','invhid'),\n ('invhidden','invhidden'),\n ('invhk','invhk'),\n ('invhkmap','invhkmap'),\n ('invhkmapp','invhkmapp'),\n ('invhkp','invhkp'),\n ('invhls','invhls'),\n ('invhlsearch','invhlsearch'),\n ('invic','invic'),\n ('invicon','invicon'),\n ('invignorecase','invignorecase'),\n ('invim','invim'),\n ('invimc','invimc'),\n ('invimcmdline','invimcmdline'),\n ('invimd','invimd'),\n ('invimdisable','invimdisable'),\n ('invincsearch','invincsearch'),\n ('invinf','invinf'),\n ('invinfercase','invinfercase'),\n ('invinsertmode','invinsertmode'),\n ('invis','invis'),\n ('invjoinspaces','invjoinspaces'),\n ('invjs','invjs'),\n ('invlazyredraw','invlazyredraw'),\n ('invlbr','invlbr'),\n ('invlinebreak','invlinebreak'),\n ('invlisp','invlisp'),\n ('invlist','invlist'),\n ('invloadplugins','invloadplugins'),\n ('invlpl','invlpl'),\n ('invlz','invlz'),\n ('invma','invma'),\n ('invmacatsui','invmacatsui'),\n ('invmagic','invmagic'),\n ('invmh','invmh'),\n ('invml','invml'),\n ('invmod','invmod'),\n ('invmodeline','invmodeline'),\n ('invmodifiable','invmodifiable'),\n ('invmodified','invmodified'),\n ('invmore','invmore'),\n ('invmousef','invmousef'),\n ('invmousefocus','invmousefocus'),\n ('invmousehide','invmousehide'),\n ('invnu','invnu'),\n ('invnumber','invnumber'),\n ('invodev','invodev'),\n ('invopendevice','invopendevice'),\n ('invpaste','invpaste'),\n ('invpi','invpi'),\n ('invpreserveindent','invpreserveindent'),\n ('invpreviewwindow','invpreviewwindow'),\n ('invprompt','invprompt'),\n ('invpvw','invpvw'),\n ('invreadonly','invreadonly'),\n ('invrelativenumber','invrelativenumber'),\n ('invremap','invremap'),\n ('invrestorescreen','invrestorescreen'),\n ('invrevins','invrevins'),\n ('invri','invri'),\n ('invrightleft','invrightleft'),\n ('invrl','invrl'),\n ('invrnu','invrnu'),\n ('invro','invro'),\n ('invrs','invrs'),\n ('invru','invru'),\n ('invruler','invruler'),\n ('invsb','invsb'),\n ('invsc','invsc'),\n ('invscb','invscb'),\n ('invscrollbind','invscrollbind'),\n ('invscs','invscs'),\n ('invsecure','invsecure'),\n ('invsft','invsft'),\n ('invshellslash','invshellslash'),\n ('invshelltemp','invshelltemp'),\n ('invshiftround','invshiftround'),\n ('invshortname','invshortname'),\n ('invshowcmd','invshowcmd'),\n ('invshowfulltag','invshowfulltag'),\n ('invshowmatch','invshowmatch'),\n ('invshowmode','invshowmode'),\n ('invsi','invsi'),\n ('invsm','invsm'),\n ('invsmartcase','invsmartcase'),\n ('invsmartindent','invsmartindent'),\n ('invsmarttab','invsmarttab'),\n ('invsmd','invsmd'),\n ('invsn','invsn'),\n ('invsol','invsol'),\n ('invspell','invspell'),\n ('invsplitbelow','invsplitbelow'),\n ('invsplitright','invsplitright'),\n ('invspr','invspr'),\n ('invsr','invsr'),\n ('invssl','invssl'),\n ('invsta','invsta'),\n ('invstartofline','invstartofline'),\n ('invstmp','invstmp'),\n ('invswapfile','invswapfile'),\n ('invswf','invswf'),\n ('invta','invta'),\n ('invtagbsearch','invtagbsearch'),\n ('invtagrelative','invtagrelative'),\n ('invtagstack','invtagstack'),\n ('invtbi','invtbi'),\n ('invtbidi','invtbidi'),\n ('invtbs','invtbs'),\n ('invtermbidi','invtermbidi'),\n ('invterse','invterse'),\n ('invtextauto','invtextauto'),\n ('invtextmode','invtextmode'),\n ('invtf','invtf'),\n ('invtgst','invtgst'),\n ('invtildeop','invtildeop'),\n ('invtimeout','invtimeout'),\n ('invtitle','invtitle'),\n ('invto','invto'),\n ('invtop','invtop'),\n ('invtr','invtr'),\n ('invttimeout','invttimeout'),\n ('invttybuiltin','invttybuiltin'),\n ('invttyfast','invttyfast'),\n ('invtx','invtx'),\n ('invudf','invudf'),\n ('invundofile','invundofile'),\n ('invvb','invvb'),\n ('invvisualbell','invvisualbell'),\n ('invwa','invwa'),\n ('invwarn','invwarn'),\n ('invwb','invwb'),\n ('invweirdinvert','invweirdinvert'),\n ('invwfh','invwfh'),\n ('invwfw','invwfw'),\n ('invwic','invwic'),\n ('invwildignorecase','invwildignorecase'),\n ('invwildmenu','invwildmenu'),\n ('invwinfixheight','invwinfixheight'),\n ('invwinfixwidth','invwinfixwidth'),\n ('invwiv','invwiv'),\n ('invwmnu','invwmnu'),\n ('invwrap','invwrap'),\n ('invwrapscan','invwrapscan'),\n ('invwrite','invwrite'),\n ('invwriteany','invwriteany'),\n ('invwritebackup','invwritebackup'),\n ('invws','invws'),\n ('is','is'),\n ('isf','isf'),\n ('isfname','isfname'),\n ('isi','isi'),\n ('isident','isident'),\n ('isk','isk'),\n ('iskeyword','iskeyword'),\n ('isp','isp'),\n ('isprint','isprint'),\n ('joinspaces','joinspaces'),\n ('js','js'),\n ('key','key'),\n ('keymap','keymap'),\n ('keymodel','keymodel'),\n ('keywordprg','keywordprg'),\n ('km','km'),\n ('kmp','kmp'),\n ('kp','kp'),\n ('langmap','langmap'),\n ('langmenu','langmenu'),\n ('laststatus','laststatus'),\n ('lazyredraw','lazyredraw'),\n ('lbr','lbr'),\n ('lcs','lcs'),\n ('linebreak','linebreak'),\n ('lines','lines'),\n ('linespace','linespace'),\n ('lisp','lisp'),\n ('lispwords','lispwords'),\n ('list','list'),\n ('listchars','listchars'),\n ('lm','lm'),\n ('lmap','lmap'),\n ('loadplugins','loadplugins'),\n ('lpl','lpl'),\n ('ls','ls'),\n ('lsp','lsp'),\n ('lw','lw'),\n ('lz','lz'),\n ('ma','ma'),\n ('macatsui','macatsui'),\n ('magic','magic'),\n ('makeef','makeef'),\n ('makeprg','makeprg'),\n ('mat','mat'),\n ('matchpairs','matchpairs'),\n ('matchtime','matchtime'),\n ('maxcombine','maxcombine'),\n ('maxfuncdepth','maxfuncdepth'),\n ('maxmapdepth','maxmapdepth'),\n ('maxmem','maxmem'),\n ('maxmempattern','maxmempattern'),\n ('maxmemtot','maxmemtot'),\n ('mco','mco'),\n ('mef','mef'),\n ('menuitems','menuitems'),\n ('mfd','mfd'),\n ('mh','mh'),\n ('mis','mis'),\n ('mkspellmem','mkspellmem'),\n ('ml','ml'),\n ('mls','mls'),\n ('mm','mm'),\n ('mmd','mmd'),\n ('mmp','mmp'),\n ('mmt','mmt'),\n ('mod','mod'),\n ('modeline','modeline'),\n ('modelines','modelines'),\n ('modifiable','modifiable'),\n ('modified','modified'),\n ('more','more'),\n ('mouse','mouse'),\n ('mousef','mousef'),\n ('mousefocus','mousefocus'),\n ('mousehide','mousehide'),\n ('mousem','mousem'),\n ('mousemodel','mousemodel'),\n ('mouses','mouses'),\n ('mouseshape','mouseshape'),\n ('mouset','mouset'),\n ('mousetime','mousetime'),\n ('mp','mp'),\n ('mps','mps'),\n ('msm','msm'),\n ('mzq','mzq'),\n ('mzquantum','mzquantum'),\n ('nf','nf'),\n ('nnoremap','nnoremap'),\n ('noacd','noacd'),\n ('noai','noai'),\n ('noakm','noakm'),\n ('noallowrevins','noallowrevins'),\n ('noaltkeymap','noaltkeymap'),\n ('noanti','noanti'),\n ('noantialias','noantialias'),\n ('noar','noar'),\n ('noarab','noarab'),\n ('noarabic','noarabic'),\n ('noarabicshape','noarabicshape'),\n ('noari','noari'),\n ('noarshape','noarshape'),\n ('noautochdir','noautochdir'),\n ('noautoindent','noautoindent'),\n ('noautoread','noautoread'),\n ('noautowrite','noautowrite'),\n ('noautowriteall','noautowriteall'),\n ('noaw','noaw'),\n ('noawa','noawa'),\n ('nobackup','nobackup'),\n ('noballooneval','noballooneval'),\n ('nobeval','nobeval'),\n ('nobin','nobin'),\n ('nobinary','nobinary'),\n ('nobiosk','nobiosk'),\n ('nobioskey','nobioskey'),\n ('nobk','nobk'),\n ('nobl','nobl'),\n ('nobomb','nobomb'),\n ('nobuflisted','nobuflisted'),\n ('nocf','nocf'),\n ('noci','noci'),\n ('nocin','nocin'),\n ('nocindent','nocindent'),\n ('nocompatible','nocompatible'),\n ('noconfirm','noconfirm'),\n ('noconsk','noconsk'),\n ('noconskey','noconskey'),\n ('nocopyindent','nocopyindent'),\n ('nocp','nocp'),\n ('nocrb','nocrb'),\n ('nocscoperelative','nocscoperelative'),\n ('nocscopetag','nocscopetag'),\n ('nocscopeverbose','nocscopeverbose'),\n ('nocsre','nocsre'),\n ('nocst','nocst'),\n ('nocsverb','nocsverb'),\n ('nocuc','nocuc'),\n ('nocul','nocul'),\n ('nocursorbind','nocursorbind'),\n ('nocursorcolumn','nocursorcolumn'),\n ('nocursorline','nocursorline'),\n ('nodeco','nodeco'),\n ('nodelcombine','nodelcombine'),\n ('nodg','nodg'),\n ('nodiff','nodiff'),\n ('nodigraph','nodigraph'),\n ('noea','noea'),\n ('noeb','noeb'),\n ('noed','noed'),\n ('noedcompatible','noedcompatible'),\n ('noek','noek'),\n ('noendofline','noendofline'),\n ('noeol','noeol'),\n ('noequalalways','noequalalways'),\n ('noerrorbells','noerrorbells'),\n ('noesckeys','noesckeys'),\n ('noet','noet'),\n ('noex','noex'),\n ('noexpandtab','noexpandtab'),\n ('noexrc','noexrc'),\n ('nofen','nofen'),\n ('nofic','nofic'),\n ('nofileignorecase','nofileignorecase'),\n ('nofk','nofk'),\n ('nofkmap','nofkmap'),\n ('nofoldenable','nofoldenable'),\n ('nogd','nogd'),\n ('nogdefault','nogdefault'),\n ('noguipty','noguipty'),\n ('nohid','nohid'),\n ('nohidden','nohidden'),\n ('nohk','nohk'),\n ('nohkmap','nohkmap'),\n ('nohkmapp','nohkmapp'),\n ('nohkp','nohkp'),\n ('nohls','nohls'),\n ('nohlsearch','nohlsearch'),\n ('noic','noic'),\n ('noicon','noicon'),\n ('noignorecase','noignorecase'),\n ('noim','noim'),\n ('noimc','noimc'),\n ('noimcmdline','noimcmdline'),\n ('noimd','noimd'),\n ('noimdisable','noimdisable'),\n ('noincsearch','noincsearch'),\n ('noinf','noinf'),\n ('noinfercase','noinfercase'),\n ('noinsertmode','noinsertmode'),\n ('nois','nois'),\n ('nojoinspaces','nojoinspaces'),\n ('nojs','nojs'),\n ('nolazyredraw','nolazyredraw'),\n ('nolbr','nolbr'),\n ('nolinebreak','nolinebreak'),\n ('nolisp','nolisp'),\n ('nolist','nolist'),\n ('noloadplugins','noloadplugins'),\n ('nolpl','nolpl'),\n ('nolz','nolz'),\n ('noma','noma'),\n ('nomacatsui','nomacatsui'),\n ('nomagic','nomagic'),\n ('nomh','nomh'),\n ('noml','noml'),\n ('nomod','nomod'),\n ('nomodeline','nomodeline'),\n ('nomodifiable','nomodifiable'),\n ('nomodified','nomodified'),\n ('nomore','nomore'),\n ('nomousef','nomousef'),\n ('nomousefocus','nomousefocus'),\n ('nomousehide','nomousehide'),\n ('nonu','nonu'),\n ('nonumber','nonumber'),\n ('noodev','noodev'),\n ('noopendevice','noopendevice'),\n ('nopaste','nopaste'),\n ('nopi','nopi'),\n ('nopreserveindent','nopreserveindent'),\n ('nopreviewwindow','nopreviewwindow'),\n ('noprompt','noprompt'),\n ('nopvw','nopvw'),\n ('noreadonly','noreadonly'),\n ('norelativenumber','norelativenumber'),\n ('noremap','noremap'),\n ('norestorescreen','norestorescreen'),\n ('norevins','norevins'),\n ('nori','nori'),\n ('norightleft','norightleft'),\n ('norl','norl'),\n ('nornu','nornu'),\n ('noro','noro'),\n ('nors','nors'),\n ('noru','noru'),\n ('noruler','noruler'),\n ('nosb','nosb'),\n ('nosc','nosc'),\n ('noscb','noscb'),\n ('noscrollbind','noscrollbind'),\n ('noscs','noscs'),\n ('nosecure','nosecure'),\n ('nosft','nosft'),\n ('noshellslash','noshellslash'),\n ('noshelltemp','noshelltemp'),\n ('noshiftround','noshiftround'),\n ('noshortname','noshortname'),\n ('noshowcmd','noshowcmd'),\n ('noshowfulltag','noshowfulltag'),\n ('noshowmatch','noshowmatch'),\n ('noshowmode','noshowmode'),\n ('nosi','nosi'),\n ('nosm','nosm'),\n ('nosmartcase','nosmartcase'),\n ('nosmartindent','nosmartindent'),\n ('nosmarttab','nosmarttab'),\n ('nosmd','nosmd'),\n ('nosn','nosn'),\n ('nosol','nosol'),\n ('nospell','nospell'),\n ('nosplitbelow','nosplitbelow'),\n ('nosplitright','nosplitright'),\n ('nospr','nospr'),\n ('nosr','nosr'),\n ('nossl','nossl'),\n ('nosta','nosta'),\n ('nostartofline','nostartofline'),\n ('nostmp','nostmp'),\n ('noswapfile','noswapfile'),\n ('noswf','noswf'),\n ('nota','nota'),\n ('notagbsearch','notagbsearch'),\n ('notagrelative','notagrelative'),\n ('notagstack','notagstack'),\n ('notbi','notbi'),\n ('notbidi','notbidi'),\n ('notbs','notbs'),\n ('notermbidi','notermbidi'),\n ('noterse','noterse'),\n ('notextauto','notextauto'),\n ('notextmode','notextmode'),\n ('notf','notf'),\n ('notgst','notgst'),\n ('notildeop','notildeop'),\n ('notimeout','notimeout'),\n ('notitle','notitle'),\n ('noto','noto'),\n ('notop','notop'),\n ('notr','notr'),\n ('nottimeout','nottimeout'),\n ('nottybuiltin','nottybuiltin'),\n ('nottyfast','nottyfast'),\n ('notx','notx'),\n ('noudf','noudf'),\n ('noundofile','noundofile'),\n ('novb','novb'),\n ('novisualbell','novisualbell'),\n ('nowa','nowa'),\n ('nowarn','nowarn'),\n ('nowb','nowb'),\n ('noweirdinvert','noweirdinvert'),\n ('nowfh','nowfh'),\n ('nowfw','nowfw'),\n ('nowic','nowic'),\n ('nowildignorecase','nowildignorecase'),\n ('nowildmenu','nowildmenu'),\n ('nowinfixheight','nowinfixheight'),\n ('nowinfixwidth','nowinfixwidth'),\n ('nowiv','nowiv'),\n ('nowmnu','nowmnu'),\n ('nowrap','nowrap'),\n ('nowrapscan','nowrapscan'),\n ('nowrite','nowrite'),\n ('nowriteany','nowriteany'),\n ('nowritebackup','nowritebackup'),\n ('nows','nows'),\n ('nrformats','nrformats'),\n ('nu','nu'),\n ('number','number'),\n ('numberwidth','numberwidth'),\n ('nuw','nuw'),\n ('odev','odev'),\n ('oft','oft'),\n ('ofu','ofu'),\n ('omnifunc','omnifunc'),\n ('opendevice','opendevice'),\n ('operatorfunc','operatorfunc'),\n ('opfunc','opfunc'),\n ('osfiletype','osfiletype'),\n ('pa','pa'),\n ('para','para'),\n ('paragraphs','paragraphs'),\n ('paste','paste'),\n ('pastetoggle','pastetoggle'),\n ('patchexpr','patchexpr'),\n ('patchmode','patchmode'),\n ('path','path'),\n ('pdev','pdev'),\n ('penc','penc'),\n ('pex','pex'),\n ('pexpr','pexpr'),\n ('pfn','pfn'),\n ('ph','ph'),\n ('pheader','pheader'),\n ('pi','pi'),\n ('pm','pm'),\n ('pmbcs','pmbcs'),\n ('pmbfn','pmbfn'),\n ('popt','popt'),\n ('preserveindent','preserveindent'),\n ('previewheight','previewheight'),\n ('previewwindow','previewwindow'),\n ('printdevice','printdevice'),\n ('printencoding','printencoding'),\n ('printexpr','printexpr'),\n ('printfont','printfont'),\n ('printheader','printheader'),\n ('printmbcharset','printmbcharset'),\n ('printmbfont','printmbfont'),\n ('printoptions','printoptions'),\n ('prompt','prompt'),\n ('pt','pt'),\n ('pumheight','pumheight'),\n ('pvh','pvh'),\n ('pvw','pvw'),\n ('qe','qe'),\n ('quoteescape','quoteescape'),\n ('rdt','rdt'),\n ('re','re'),\n ('readonly','readonly'),\n ('redrawtime','redrawtime'),\n ('regexpengine','regexpengine'),\n ('relativenumber','relativenumber'),\n ('remap','remap'),\n ('report','report'),\n ('restorescreen','restorescreen'),\n ('revins','revins'),\n ('ri','ri'),\n ('rightleft','rightleft'),\n ('rightleftcmd','rightleftcmd'),\n ('rl','rl'),\n ('rlc','rlc'),\n ('rnu','rnu'),\n ('ro','ro'),\n ('rs','rs'),\n ('rtp','rtp'),\n ('ru','ru'),\n ('ruf','ruf'),\n ('ruler','ruler'),\n ('rulerformat','rulerformat'),\n ('runtimepath','runtimepath'),\n ('sb','sb'),\n ('sbo','sbo'),\n ('sbr','sbr'),\n ('sc','sc'),\n ('scb','scb'),\n ('scr','scr'),\n ('scroll','scroll'),\n ('scrollbind','scrollbind'),\n ('scrolljump','scrolljump'),\n ('scrolloff','scrolloff'),\n ('scrollopt','scrollopt'),\n ('scs','scs'),\n ('sect','sect'),\n ('sections','sections'),\n ('secure','secure'),\n ('sel','sel'),\n ('selection','selection'),\n ('selectmode','selectmode'),\n ('sessionoptions','sessionoptions'),\n ('sft','sft'),\n ('sh','sh'),\n ('shcf','shcf'),\n ('shell','shell'),\n ('shellcmdflag','shellcmdflag'),\n ('shellpipe','shellpipe'),\n ('shellquote','shellquote'),\n ('shellredir','shellredir'),\n ('shellslash','shellslash'),\n ('shelltemp','shelltemp'),\n ('shelltype','shelltype'),\n ('shellxescape','shellxescape'),\n ('shellxquote','shellxquote'),\n ('shiftround','shiftround'),\n ('shiftwidth','shiftwidth'),\n ('shm','shm'),\n ('shortmess','shortmess'),\n ('shortname','shortname'),\n ('showbreak','showbreak'),\n ('showcmd','showcmd'),\n ('showfulltag','showfulltag'),\n ('showmatch','showmatch'),\n ('showmode','showmode'),\n ('showtabline','showtabline'),\n ('shq','shq'),\n ('si','si'),\n ('sidescroll','sidescroll'),\n ('sidescrolloff','sidescrolloff'),\n ('siso','siso'),\n ('sj','sj'),\n ('slm','slm'),\n ('sm','sm'),\n ('smartcase','smartcase'),\n ('smartindent','smartindent'),\n ('smarttab','smarttab'),\n ('smc','smc'),\n ('smd','smd'),\n ('sn','sn'),\n ('so','so'),\n ('softtabstop','softtabstop'),\n ('sol','sol'),\n ('sp','sp'),\n ('spc','spc'),\n ('spell','spell'),\n ('spellcapcheck','spellcapcheck'),\n ('spellfile','spellfile'),\n ('spelllang','spelllang'),\n ('spellsuggest','spellsuggest'),\n ('spf','spf'),\n ('spl','spl'),\n ('splitbelow','splitbelow'),\n ('splitright','splitright'),\n ('spr','spr'),\n ('sps','sps'),\n ('sr','sr'),\n ('srr','srr'),\n ('ss','ss'),\n ('ssl','ssl'),\n ('ssop','ssop'),\n ('st','st'),\n ('sta','sta'),\n ('stal','stal'),\n ('startofline','startofline'),\n ('statusline','statusline'),\n ('stl','stl'),\n ('stmp','stmp'),\n ('sts','sts'),\n ('su','su'),\n ('sua','sua'),\n ('suffixes','suffixes'),\n ('suffixesadd','suffixesadd'),\n ('sw','sw'),\n ('swapfile','swapfile'),\n ('swapsync','swapsync'),\n ('swb','swb'),\n ('swf','swf'),\n ('switchbuf','switchbuf'),\n ('sws','sws'),\n ('sxe','sxe'),\n ('sxq','sxq'),\n ('syn','syn'),\n ('synmaxcol','synmaxcol'),\n ('syntax','syntax'),\n ('t_AB','t_AB'),\n ('t_AF','t_AF'),\n ('t_AL','t_AL'),\n ('t_CS','t_CS'),\n ('t_CV','t_CV'),\n ('t_Ce','t_Ce'),\n ('t_Co','t_Co'),\n ('t_Cs','t_Cs'),\n ('t_DL','t_DL'),\n ('t_EI','t_EI'),\n ('t_F1','t_F1'),\n ('t_F2','t_F2'),\n ('t_F3','t_F3'),\n ('t_F4','t_F4'),\n ('t_F5','t_F5'),\n ('t_F6','t_F6'),\n ('t_F7','t_F7'),\n ('t_F8','t_F8'),\n ('t_F9','t_F9'),\n ('t_IE','t_IE'),\n ('t_IS','t_IS'),\n ('t_K1','t_K1'),\n ('t_K3','t_K3'),\n ('t_K4','t_K4'),\n ('t_K5','t_K5'),\n ('t_K6','t_K6'),\n ('t_K7','t_K7'),\n ('t_K8','t_K8'),\n ('t_K9','t_K9'),\n ('t_KA','t_KA'),\n ('t_KB','t_KB'),\n ('t_KC','t_KC'),\n ('t_KD','t_KD'),\n ('t_KE','t_KE'),\n ('t_KF','t_KF'),\n ('t_KG','t_KG'),\n ('t_KH','t_KH'),\n ('t_KI','t_KI'),\n ('t_KJ','t_KJ'),\n ('t_KK','t_KK'),\n ('t_KL','t_KL'),\n ('t_RI','t_RI'),\n ('t_RV','t_RV'),\n ('t_SI','t_SI'),\n ('t_Sb','t_Sb'),\n ('t_Sf','t_Sf'),\n ('t_WP','t_WP'),\n ('t_WS','t_WS'),\n ('t_ZH','t_ZH'),\n ('t_ZR','t_ZR'),\n ('t_al','t_al'),\n ('t_bc','t_bc'),\n ('t_cd','t_cd'),\n ('t_ce','t_ce'),\n ('t_cl','t_cl'),\n ('t_cm','t_cm'),\n ('t_cs','t_cs'),\n ('t_da','t_da'),\n ('t_db','t_db'),\n ('t_dl','t_dl'),\n ('t_fs','t_fs'),\n ('t_k1','t_k1'),\n ('t_k2','t_k2'),\n ('t_k3','t_k3'),\n ('t_k4','t_k4'),\n ('t_k5','t_k5'),\n ('t_k6','t_k6'),\n ('t_k7','t_k7'),\n ('t_k8','t_k8'),\n ('t_k9','t_k9'),\n ('t_kB','t_kB'),\n ('t_kD','t_kD'),\n ('t_kI','t_kI'),\n ('t_kN','t_kN'),\n ('t_kP','t_kP'),\n ('t_kb','t_kb'),\n ('t_kd','t_kd'),\n ('t_ke','t_ke'),\n ('t_kh','t_kh'),\n ('t_kl','t_kl'),\n ('t_kr','t_kr'),\n ('t_ks','t_ks'),\n ('t_ku','t_ku'),\n ('t_le','t_le'),\n ('t_mb','t_mb'),\n ('t_md','t_md'),\n ('t_me','t_me'),\n ('t_mr','t_mr'),\n ('t_ms','t_ms'),\n ('t_nd','t_nd'),\n ('t_op','t_op'),\n ('t_se','t_se'),\n ('t_so','t_so'),\n ('t_sr','t_sr'),\n ('t_te','t_te'),\n ('t_ti','t_ti'),\n ('t_ts','t_ts'),\n ('t_u7','t_u7'),\n ('t_ue','t_ue'),\n ('t_us','t_us'),\n ('t_ut','t_ut'),\n ('t_vb','t_vb'),\n ('t_ve','t_ve'),\n ('t_vi','t_vi'),\n ('t_vs','t_vs'),\n ('t_xs','t_xs'),\n ('ta','ta'),\n ('tabline','tabline'),\n ('tabpagemax','tabpagemax'),\n ('tabstop','tabstop'),\n ('tag','tag'),\n ('tagbsearch','tagbsearch'),\n ('taglength','taglength'),\n ('tagrelative','tagrelative'),\n ('tags','tags'),\n ('tagstack','tagstack'),\n ('tal','tal'),\n ('tb','tb'),\n ('tbi','tbi'),\n ('tbidi','tbidi'),\n ('tbis','tbis'),\n ('tbs','tbs'),\n ('tenc','tenc'),\n ('term','term'),\n ('termbidi','termbidi'),\n ('termencoding','termencoding'),\n ('terse','terse'),\n ('textauto','textauto'),\n ('textmode','textmode'),\n ('textwidth','textwidth'),\n ('tf','tf'),\n ('tgst','tgst'),\n ('thesaurus','thesaurus'),\n ('tildeop','tildeop'),\n ('timeout','timeout'),\n ('timeoutlen','timeoutlen'),\n ('title','title'),\n ('titlelen','titlelen'),\n ('titleold','titleold'),\n ('titlestring','titlestring'),\n ('tl','tl'),\n ('tm','tm'),\n ('to','to'),\n ('toolbar','toolbar'),\n ('toolbariconsize','toolbariconsize'),\n ('top','top'),\n ('tpm','tpm'),\n ('tr','tr'),\n ('ts','ts'),\n ('tsl','tsl'),\n ('tsr','tsr'),\n ('ttimeout','ttimeout'),\n ('ttimeoutlen','ttimeoutlen'),\n ('ttm','ttm'),\n ('tty','tty'),\n ('ttybuiltin','ttybuiltin'),\n ('ttyfast','ttyfast'),\n ('ttym','ttym'),\n ('ttymouse','ttymouse'),\n ('ttyscroll','ttyscroll'),\n ('ttytype','ttytype'),\n ('tw','tw'),\n ('tx','tx'),\n ('uc','uc'),\n ('udf','udf'),\n ('udir','udir'),\n ('ul','ul'),\n ('undodir','undodir'),\n ('undofile','undofile'),\n ('undolevels','undolevels'),\n ('undoreload','undoreload'),\n ('updatecount','updatecount'),\n ('updatetime','updatetime'),\n ('ur','ur'),\n ('ut','ut'),\n ('vb','vb'),\n ('vbs','vbs'),\n ('vdir','vdir'),\n ('ve','ve'),\n ('verbose','verbose'),\n ('verbosefile','verbosefile'),\n ('vfile','vfile'),\n ('vi','vi'),\n ('viewdir','viewdir'),\n ('viewoptions','viewoptions'),\n ('viminfo','viminfo'),\n ('virtualedit','virtualedit'),\n ('visualbell','visualbell'),\n ('vnoremap','vnoremap'),\n ('vop','vop'),\n ('wa','wa'),\n ('wak','wak'),\n ('warn','warn'),\n ('wb','wb'),\n ('wc','wc'),\n ('wcm','wcm'),\n ('wd','wd'),\n ('weirdinvert','weirdinvert'),\n ('wfh','wfh'),\n ('wfw','wfw'),\n ('wh','wh'),\n ('whichwrap','whichwrap'),\n ('wi','wi'),\n ('wic','wic'),\n ('wig','wig'),\n ('wildchar','wildchar'),\n ('wildcharm','wildcharm'),\n ('wildignore','wildignore'),\n ('wildignorecase','wildignorecase'),\n ('wildmenu','wildmenu'),\n ('wildmode','wildmode'),\n ('wildoptions','wildoptions'),\n ('wim','wim'),\n ('winaltkeys','winaltkeys'),\n ('window','window'),\n ('winfixheight','winfixheight'),\n ('winfixwidth','winfixwidth'),\n ('winheight','winheight'),\n ('winminheight','winminheight'),\n ('winminwidth','winminwidth'),\n ('winwidth','winwidth'),\n ('wiv','wiv'),\n ('wiw','wiw'),\n ('wm','wm'),\n ('wmh','wmh'),\n ('wmnu','wmnu'),\n ('wmw','wmw'),\n ('wop','wop'),\n ('wrap','wrap'),\n ('wrapmargin','wrapmargin'),\n ('wrapscan','wrapscan'),\n ('write','write'),\n ('writeany','writeany'),\n ('writebackup','writebackup'),\n ('writedelay','writedelay'),\n ('ws','ws'),\n ('ww','ww'),\n )\n return var\noption = _getoption()\n\n"},"license":{"kind":"string","value":"apache-2.0"}}},{"rowIdx":2730,"cells":{"repo_name":{"kind":"string","value":"tudorvio/nova"},"path":{"kind":"string","value":"nova/api/openstack/compute/schemas/v3/image_metadata.py"},"copies":{"kind":"string","value":"95"},"size":{"kind":"string","value":"1177"},"content":{"kind":"string","value":"# Copyright 2014 IBM Corporation. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport copy\n\nfrom nova.api.validation import parameter_types\n\n\ncreate = {\n 'type': 'object',\n 'properties': {\n 'metadata': parameter_types.metadata\n },\n 'required': ['metadata'],\n 'additionalProperties': False,\n}\n\nsingle_metadata = copy.deepcopy(parameter_types.metadata)\nsingle_metadata.update({\n 'minProperties': 1,\n 'maxProperties': 1\n})\n\nupdate = {\n 'type': 'object',\n 'properties': {\n 'meta': single_metadata\n },\n 'required': ['meta'],\n 'additionalProperties': False,\n}\n\nupdate_all = create\n"},"license":{"kind":"string","value":"apache-2.0"}}},{"rowIdx":2731,"cells":{"repo_name":{"kind":"string","value":"PHOTOX/fuase"},"path":{"kind":"string","value":"ase/ase/dft/pars_beefvdw.py"},"copies":{"kind":"string","value":"7"},"size":{"kind":"string","value":"23666"},"content":{"kind":"string","value":"import numpy as np\n\n\"\"\"\nBEEF-vdW ensemble matrix\n\"\"\"\nuiOmega = np.array([\n[ 9.238289896663336e-02 , 1.573812432079919e-01 , 1.029935738540308e-01 , 1.366003143143216e-02 , -2.170819634832974e-02 , -1.971473025898487e-03 , 6.694499988752175e-03 , -1.436837103528228e-03 , -1.894288263659829e-03 , 1.620730202731354e-03 , 3.342742083591797e-05 , -8.935288190655010e-04 , 5.660396510944252e-04 , 1.092640880494036e-04 , -3.909536572033999e-04 , 2.271387694573118e-04 , 4.720081507064245e-05 , -1.728805247746040e-04 , 1.161095890105822e-04 , 1.632569772443308e-05 , -9.505329207480296e-05 , 5.966357079138161e-05 , 3.909940118293563e-05 , -9.094078397503243e-05 , 3.979403197298154e-05 , 5.883724662690913e-05 , -8.868728142026543e-05 , 1.649195968392651e-05 , 3.986378541237102e-05 , -2.080734204109696e-05 , -5.210020320050114e-02 ],\n[ 1.573812432080020e-01 , 3.194503568212250e-01 , 2.330350019456029e-01 , 3.539526885754365e-02 , -4.398162505429017e-02 , -7.870052015456349e-03 , 1.288386845762548e-02 , -1.452985165647521e-03 , -3.414852982913958e-03 , 2.242106483095301e-03 , 2.411666744826487e-04 , -1.065238741066354e-03 , 4.135880276069384e-04 , 2.536775346693924e-04 , -2.530397572915468e-04 , -5.690638693892032e-05 , 1.673844673999724e-04 , -9.944997873568069e-06 , -1.718953440120930e-04 , 1.760399953825598e-04 , -4.156338135631344e-06 , -1.832004402941794e-04 , 2.147464735651294e-04 , -6.193272093284920e-05 , -1.319710553323893e-04 , 1.948452573660156e-04 , -5.101630490846988e-05 , -9.176394513865211e-05 , 4.717722996545362e-05 , 7.111249931485782e-06 , -1.890906559696380e-02 ],\n[ 1.029935738540465e-01 , 2.330350019456185e-01 , 1.906771663140688e-01 , 4.596131842244390e-02 , -2.792908137436464e-02 , -1.240232492150593e-02 , 5.672917933168648e-03 , 1.434385697982085e-03 , -9.455904542077782e-04 , 3.036359098459168e-05 , 1.161828188486106e-04 , 7.937359374341367e-05 , -1.452498186750268e-04 , 1.384058476815110e-05 , 1.530299855805981e-04 , -1.908370243275392e-04 , 5.614920168522352e-05 , 1.448595900033545e-04 , -2.366731351667913e-04 , 1.303628937641542e-04 , 8.403491035544659e-05 , -2.162539474930004e-04 , 1.579894933576654e-04 , 1.853443013110853e-05 , -1.453365923716440e-04 , 1.270119640983266e-04 , 1.393651877686879e-05 , -8.735349638010247e-05 , 1.562163815156337e-05 , 1.819382613180743e-05 , 1.382668594717776e-02 ],\n[ 1.366003143144247e-02 , 3.539526885755911e-02 , 4.596131842245237e-02 , 3.412600355844948e-02 , 5.788002236623282e-03 , -9.314441356035262e-03 , -5.276305980529734e-03 , 2.351769282262449e-03 , 1.746899840570664e-03 , -1.053810170761046e-03 , -2.902616086744972e-04 , 5.752547360555607e-04 , -8.857003353891879e-05 , -2.395794347875841e-04 , 1.413569388536142e-04 , 5.605747482892052e-05 , -9.488998643296934e-05 , 2.026963310534137e-05 , 3.772638762355388e-05 , -4.067190865485931e-05 , 1.321492117521963e-05 , 1.940880629107831e-05 , -3.480998018498056e-05 , 1.778991053651829e-05 , 1.586887875776044e-05 , -3.017037178432038e-05 , 6.647594986708508e-06 , 1.545376441325688e-05 , -5.578313586587479e-06 , -2.498675358121092e-06 , -7.076421937394695e-03 ],\n[ -2.170819634832771e-02 , -4.398162505428508e-02 , -2.792908137435959e-02 , 5.788002236625639e-03 , 1.599472206930952e-02 , 1.608917143245890e-03 , -5.597384471167169e-03 , -1.499164748509191e-03 , 1.031475806000458e-03 , 5.332996506181574e-04 , -2.489713532023827e-04 , -1.029965243518429e-04 , 1.699409468310518e-04 , -5.189717276078564e-05 , -6.126197146900113e-05 , 8.454620554637730e-05 , -2.898403340456230e-05 , -4.695866195676658e-05 , 7.705234549813160e-05 , -3.658438803802928e-05 , -3.319317982415972e-05 , 6.573717163798472e-05 , -3.698152620572900e-05 , -1.629294629181860e-05 , 4.241341573520274e-05 , -2.624727597577873e-05 , -1.229090821564833e-05 , 2.348090332681114e-05 , -2.215657597169080e-07 , -6.444872622959645e-06 , 7.322667111791249e-04 ],\n[ -1.971473025900972e-03 , -7.870052015460869e-03 , -1.240232492150907e-02 , -9.314441356035836e-03 , 1.608917143246348e-03 , 7.634754660592785e-03 , 2.015667017611551e-03 , -3.623574339977459e-03 , -1.474755821692741e-03 , 1.127995802260326e-03 , 4.639737083120432e-04 , -4.567637545650261e-04 , -2.016876766012911e-05 , 2.508509815496272e-04 , -1.147671414054848e-04 , -7.415040892571524e-05 , 9.932046149486572e-05 , -1.325820303664777e-05 , -5.028147494244732e-05 , 4.435536803388949e-05 , -2.227553213442618e-06 , -3.139708798837062e-05 , 3.307650446358692e-05 , -6.558671845195734e-06 , -2.123041867524418e-05 , 2.397646436678162e-05 , 9.138618011606733e-07 , -1.527849014454442e-05 , 2.261408120954423e-06 , 3.617283769859004e-06 , 2.325697711871941e-03 ],\n[ 6.694499988750638e-03 , 1.288386845762195e-02 , 5.672917933165492e-03 , -5.276305980530938e-03 , -5.597384471167074e-03 , 2.015667017611739e-03 , 4.377508336814056e-03 , 4.100359917331289e-04 , -1.876150671093797e-03 , -7.271917289430953e-04 , 4.632933527994722e-04 , 2.963398987389869e-04 , -1.506945170950558e-04 , -5.149346314745077e-05 , 9.215110292974351e-05 , -3.132804577761338e-05 , -2.100641270393858e-05 , 3.506730172274297e-05 , -2.465494126635098e-05 , 1.240900749825681e-06 , 2.076535734347166e-05 , -2.285062874633954e-05 , 4.208354769194986e-06 , 1.425348474305690e-05 , -1.526811061895161e-05 , 3.047660598079506e-06 , 9.299255727538788e-06 , -8.183025849838069e-06 , -2.016271133614633e-06 , 3.118202698102115e-06 , -1.983005807705875e-03 ],\n[ -1.436837103527614e-03 , -1.452985165646303e-03 , 1.434385697983009e-03 , 2.351769282262657e-03 , -1.499164748509336e-03 , -3.623574339977513e-03 , 4.100359917331572e-04 , 3.388139698932502e-03 , 4.194131188659545e-04 , -1.640686728848097e-03 , -4.535159587025243e-04 , 5.155942974268080e-04 , 1.219637950738874e-04 , -1.881362361335498e-04 , 5.406677887798438e-05 , 6.730117550948196e-05 , -6.826604522477651e-05 , -7.600076704978491e-08 , 4.545041141091276e-05 , -3.434406804211548e-05 , -5.396753498031206e-06 , 3.160900890445868e-05 , -2.489184945477622e-05 , -2.480536094745677e-06 , 2.230938441981598e-05 , -1.767486060639981e-05 , -6.845063675872953e-06 , 1.581526117380142e-05 , 2.198506926484949e-07 , -4.837425950871762e-06 , -2.819410239268639e-05 ],\n[ -1.894288263659430e-03 , -3.414852982912986e-03 , -9.455904542068480e-04 , 1.746899840571073e-03 , 1.031475806000471e-03 , -1.474755821692797e-03 , -1.876150671093806e-03 , 4.194131188659666e-04 , 2.016821929004358e-03 , 2.913183096117767e-04 , -1.031831612901280e-03 , -3.523961692265613e-04 , 3.020345263188065e-04 , 1.358462914820522e-04 , -1.115872186939481e-04 , 4.093795217439325e-06 , 4.590005891560275e-05 , -2.788695451888706e-05 , -4.445454868386084e-06 , 1.774618276396958e-05 , -1.122137909788981e-05 , -3.231227423595720e-06 , 1.210473810098234e-05 , -7.926468935313864e-06 , -3.432017428898823e-06 , 8.827938351713780e-06 , -2.192391060027345e-06 , -4.171466247118773e-06 , 1.331053824099077e-06 , 8.121122753847691e-07 , 1.468573793837378e-03 ],\n[ 1.620730202730968e-03 , 2.242106483094428e-03 , 3.036359098381830e-05 , -1.053810170761330e-03 , 5.332996506181955e-04 , 1.127995802260379e-03 , -7.271917289430953e-04 , -1.640686728848104e-03 , 2.913183096117794e-04 , 1.618640260028683e-03 , 1.578833514403573e-04 , -8.684832913376226e-04 , -1.835212360942493e-04 , 2.681276727854413e-04 , 3.285354767345348e-05 , -7.506050741939204e-05 , 4.030911032027864e-05 , 1.270499721233960e-05 , -3.550009040339185e-05 , 2.093845130027192e-05 , 6.936412133339431e-06 , -2.092061019101916e-05 , 1.263627438389547e-05 , 5.132905197400893e-06 , -1.410173385828192e-05 , 8.068421998377687e-06 , 6.590533164499491e-06 , -9.628875957888051e-06 , -1.186884523575427e-06 , 3.379003341108947e-06 , -1.318935000558665e-03 ],\n[ 3.342742083582248e-05 , 2.411666744824321e-04 , 1.161828188484188e-04 , -2.902616086745682e-04 , -2.489713532023758e-04 , 4.639737083120528e-04 , 4.632933527994702e-04 , -4.535159587025258e-04 , -1.031831612901280e-03 , 1.578833514403571e-04 , 1.126887798536041e-03 , 1.596306400901984e-04 , -6.262219982793480e-04 , -1.832949555936158e-04 , 2.062011811517906e-04 , 5.639579837834072e-05 , -7.429445085205222e-05 , 1.947674856272851e-05 , 2.925850101283131e-05 , -3.392404367734551e-05 , 7.606268115327377e-06 , 1.774935646371143e-05 , -2.076809415497982e-05 , 3.678275105655822e-06 , 1.351664987117452e-05 , -1.391917758734145e-05 , -3.264922954751679e-06 , 1.128720431864021e-05 , -1.552278484090616e-07 , -3.464691582178041e-06 , 2.259380952893320e-04 ],\n[ -8.935288190652161e-04 , -1.065238741065750e-03 , 7.937359374391768e-05 , 5.752547360557256e-04 , -1.029965243518811e-04 , -4.567637545650542e-04 , 2.963398987389943e-04 , 5.155942974268113e-04 , -3.523961692265653e-04 , -8.684832913376213e-04 , 1.596306400901987e-04 , 9.274502975544414e-04 , 4.771446682359326e-05 , -5.007069662988802e-04 , -7.942270207742560e-05 , 1.322450571128168e-04 , 2.441262913064850e-05 , -2.756468125262591e-05 , 6.943645566973078e-06 , 1.041750480940249e-05 , -1.187862037244014e-05 , 1.702364109770825e-06 , 7.400825614557900e-06 , -6.767501859886680e-06 , -7.456805310854244e-07 , 5.695968329623519e-06 , -2.204234030240727e-06 , -2.458146094280224e-06 , 1.077364537604088e-06 , 4.312391512705764e-07 , 5.884326361165565e-04 ],\n[ 5.660396510942980e-04 , 4.135880276066762e-04 , -1.452498186752349e-04 , -8.857003353897563e-05 , 1.699409468310743e-04 , -2.016876766011903e-05 , -1.506945170950608e-04 , 1.219637950738874e-04 , 3.020345263188087e-04 , -1.835212360942504e-04 , -6.262219982793482e-04 , 4.771446682359360e-05 , 7.353511125371758e-04 , 8.054171359132859e-05 , -4.354044149858314e-04 , -6.575758219487838e-05 , 1.322779340443631e-04 , 4.893233447412187e-06 , -2.860359932846397e-05 , 1.985815168274937e-05 , 1.407122212777636e-06 , -1.355631776270834e-05 , 9.804336837952511e-06 , 1.705077595669618e-06 , -8.448838581047592e-06 , 5.271239541237292e-06 , 3.753161433794400e-06 , -5.679341230392703e-06 , -7.297839478992945e-07 , 1.996414791054073e-06 , -5.689656491774725e-04 ],\n[ 1.092640880493588e-04 , 2.536775346692864e-04 , 1.384058476804722e-05 , -2.395794347876363e-04 , -5.189717276079290e-05 , 2.508509815496312e-04 , -5.149346314745000e-05 , -1.881362361335514e-04 , 1.358462914820523e-04 , 2.681276727854418e-04 , -1.832949555936157e-04 , -5.007069662988805e-04 , 8.054171359132875e-05 , 5.670985721529502e-04 , 4.105350281394086e-05 , -3.243779076268346e-04 , -5.693079967475888e-05 , 9.476238507687856e-05 , 1.671992883730651e-05 , -2.625490072653236e-05 , 1.094711235171939e-05 , 8.092095182176009e-06 , -1.368592923368957e-05 , 4.725521343618847e-06 , 6.462723202671019e-06 , -8.176454311340966e-06 , -1.037965911726869e-06 , 5.963104944027835e-06 , -2.287646204875769e-07 , -1.804397982061943e-06 , 6.675499678278738e-05 ],\n[ -3.909536572033257e-04 , -2.530397572913827e-04 , 1.530299855807417e-04 , 1.413569388536693e-04 , -6.126197146900289e-05 , -1.147671414054899e-04 , 9.215110292974495e-05 , 5.406677887798494e-05 , -1.115872186939490e-04 , 3.285354767345385e-05 , 2.062011811517907e-04 , -7.942270207742549e-05 , -4.354044149858315e-04 , 4.105350281394089e-05 , 5.023053531078210e-04 , 1.395753202566780e-05 , -2.794248341066854e-04 , -2.462616877967573e-05 , 7.014950575686348e-05 , 7.678983396148418e-06 , -1.200073137869544e-05 , 4.735853628377502e-06 , 3.823008200476699e-06 , -5.632608045337210e-06 , 1.401726052082347e-06 , 2.631914429094741e-06 , -1.879900165857796e-06 , -6.802392260490853e-07 , 6.412891565621652e-07 , 5.793723170821993e-08 , 2.979440856739876e-04 ],\n[ 2.271387694572524e-04 , -5.690638693903491e-05 , -1.908370243276230e-04 , 5.605747482890452e-05 , 8.454620554639201e-05 , -7.415040892571150e-05 , -3.132804577761707e-05 , 6.730117550948228e-05 , 4.093795217440853e-06 , -7.506050741939299e-05 , 5.639579837834042e-05 , 1.322450571128173e-04 , -6.575758219487839e-05 , -3.243779076268348e-04 , 1.395753202566789e-05 , 4.086277915281374e-04 , 2.438181614175771e-05 , -2.406201469878970e-04 , -2.063418073175250e-05 , 6.468348516289834e-05 , 1.651842998945461e-06 , -1.016330205472771e-05 , 7.380837404491689e-06 , 7.876901704903023e-07 , -5.693055610174383e-06 , 3.898194171094561e-06 , 1.890193310260514e-06 , -3.494268997347222e-06 , -2.097250054628417e-07 , 1.107934512468949e-06 , -2.578053969849174e-04 ],\n[ 4.720081507065945e-05 , 1.673844673999971e-04 , 5.614920168523253e-05 , -9.488998643297809e-05 , -2.898403340457248e-05 , 9.932046149486507e-05 , -2.100641270393638e-05 , -6.826604522477717e-05 , 4.590005891560220e-05 , 4.030911032027912e-05 , -7.429445085205212e-05 , 2.441262913064812e-05 , 1.322779340443633e-04 , -5.693079967475883e-05 , -2.794248341066855e-04 , 2.438181614175779e-05 , 3.367003211899217e-04 , 1.421493027932063e-05 , -1.961053122230117e-04 , -1.831760815509797e-05 , 5.249705849097755e-05 , 4.009767661794436e-06 , -9.222615132968448e-06 , 4.447935971545765e-06 , 2.844605015203588e-06 , -4.927439995523699e-06 , 2.779858179450743e-07 , 2.890920446156232e-06 , -3.536840533005166e-07 , -7.989052895188473e-07 , -2.873774500946350e-05 ],\n[ -1.728805247745767e-04 , -9.944997873510153e-06 , 1.448595900034050e-04 , 2.026963310536173e-05 , -4.695866195676680e-05 , -1.325820303664937e-05 , 3.506730172274367e-05 , -7.600076704937241e-08 , -2.788695451888763e-05 , 1.270499721233979e-05 , 1.947674856272868e-05 , -2.756468125262590e-05 , 4.893233447412072e-06 , 9.476238507687867e-05 , -2.462616877967574e-05 , -2.406201469878971e-04 , 1.421493027932067e-05 , 2.919803798609199e-04 , 7.292181033176667e-06 , -1.680274842794751e-04 , -1.103641130738799e-05 , 4.275283346882578e-05 , 1.839573029824585e-06 , -5.092906646915116e-06 , 2.996296133918005e-06 , 5.026786485483826e-07 , -1.803524706078249e-06 , 7.612853881615933e-07 , 3.175194859018497e-07 , -2.524196216716103e-07 , 2.671139718648832e-04 ],\n[ 1.161095890105204e-04 , -1.718953440122134e-04 , -2.366731351668826e-04 , 3.772638762353110e-05 , 7.705234549814230e-05 , -5.028147494244480e-05 , -2.465494126635465e-05 , 4.545041141091324e-05 , -4.445454868384867e-06 , -3.550009040339265e-05 , 2.925850101283112e-05 , 6.943645566973460e-06 , -2.860359932846412e-05 , 1.671992883730641e-05 , 7.014950575686358e-05 , -2.063418073175254e-05 , -1.961053122230117e-04 , 7.292181033176704e-06 , 2.476672606367232e-04 , 8.122604369362667e-06 , -1.452133704846186e-04 , -9.497391478575562e-06 , 3.809665940899583e-05 , 1.059672833862896e-06 , -5.566702444135148e-06 , 4.241342392780321e-06 , 1.125163314158913e-06 , -3.300826353062116e-06 , 2.381295916739009e-07 , 8.492464195141368e-07 , -2.789569803656198e-04 ],\n[ 1.632569772446249e-05 , 1.760399953826087e-04 , 1.303628937641828e-04 , -4.067190865486029e-05 , -3.658438803803874e-05 , 4.435536803388934e-05 , 1.240900749828609e-06 , -3.434406804211623e-05 , 1.774618276396873e-05 , 2.093845130027264e-05 , -3.392404367734537e-05 , 1.041750480940207e-05 , 1.985815168274956e-05 , -2.625490072653231e-05 , 7.678983396148288e-06 , 6.468348516289841e-05 , -1.831760815509795e-05 , -1.680274842794751e-04 , 8.122604369362710e-06 , 2.112966630126243e-04 , 5.363176092207731e-06 , -1.235778898069599e-04 , -7.709953870959738e-06 , 3.098655427549614e-05 , 2.634638058314591e-06 , -4.584365006125596e-06 , 7.784307399132289e-07 , 2.345452381285535e-06 , -6.188482408032955e-07 , -4.998403651495349e-07 , 8.079312086264899e-05 ],\n[ -9.505329207477657e-05 , -4.156338135574478e-06 , 8.403491035549607e-05 , 1.321492117523870e-05 , -3.319317982416059e-05 , -2.227553213444590e-06 , 2.076535734347213e-05 , -5.396753498031014e-06 , -1.122137909789006e-05 , 6.936412133339521e-06 , 7.606268115327406e-06 , -1.187862037244012e-05 , 1.407122212777626e-06 , 1.094711235171940e-05 , -1.200073137869545e-05 , 1.651842998945439e-06 , 5.249705849097757e-05 , -1.103641130738799e-05 , -1.452133704846186e-04 , 5.363176092207760e-06 , 1.841513653060571e-04 , 4.008684964031859e-06 , -1.088327175419565e-04 , -4.436272922923257e-06 , 2.663616882515994e-05 , 4.441129647729434e-07 , -1.823900470977472e-06 , 9.131027910925659e-07 , 3.423181895869568e-07 , -3.248030257457939e-07 , 1.565114731653676e-04 ],\n[ 5.966357079134110e-05 , -1.832004402942522e-04 , -2.162539474930512e-04 , 1.940880629106866e-05 , 6.573717163799288e-05 , -3.139708798836991e-05 , -2.285062874634257e-05 , 3.160900890445919e-05 , -3.231227423594649e-06 , -2.092061019101990e-05 , 1.774935646371122e-05 , 1.702364109771204e-06 , -1.355631776270847e-05 , 8.092095182175919e-06 , 4.735853628377626e-06 , -1.016330205472776e-05 , 4.009767661794407e-06 , 4.275283346882582e-05 , -9.497391478575592e-06 , -1.235778898069599e-04 , 4.008684964031889e-06 , 1.585945240480566e-04 , 4.814276592252276e-06 , -9.505942249560426e-05 , -5.269885642910686e-06 , 2.508762233822088e-05 , 1.002347324957512e-06 , -3.233685256439425e-06 , 3.615248228908033e-07 , 7.731232588721100e-07 , -2.364008973553363e-04 ],\n[ 3.909940118295615e-05 , 2.147464735651595e-04 , 1.579894933576790e-04 , -3.480998018498535e-05 , -3.698152620573602e-05 , 3.307650446358831e-05 , 4.208354769197900e-06 , -2.489184945477703e-05 , 1.210473810098150e-05 , 1.263627438389614e-05 , -2.076809415497966e-05 , 7.400825614557483e-06 , 9.804336837952683e-06 , -1.368592923368950e-05 , 3.823008200476585e-06 , 7.380837404491765e-06 , -9.222615132968445e-06 , 1.839573029824542e-06 , 3.809665940899589e-05 , -7.709953870959746e-06 , -1.088327175419565e-04 , 4.814276592252303e-06 , 1.387884209137800e-04 , 2.113244593212237e-06 , -8.153912579909763e-05 , -4.652337820383065e-06 , 1.937304772679640e-05 , 2.478096542996087e-06 , -8.169606503678209e-07 , -4.287488876009555e-07 , 1.035998031439656e-04 ],\n[ -9.094078397502061e-05 , -6.193272093282151e-05 , 1.853443013113500e-05 , 1.778991053653038e-05 , -1.629294629181825e-05 , -6.558671845197636e-06 , 1.425348474305646e-05 , -2.480536094745301e-06 , -7.926468935313898e-06 , 5.132905197400817e-06 , 3.678275105655839e-06 , -6.767501859886567e-06 , 1.705077595669545e-06 , 4.725521343618848e-06 , -5.632608045337194e-06 , 7.876901704902667e-07 , 4.447935971545785e-06 , -5.092906646915108e-06 , 1.059672833862867e-06 , 3.098655427549616e-05 , -4.436272922923254e-06 , -9.505942249560430e-05 , 2.113244593212259e-06 , 1.241068277448159e-04 , 1.324825159079387e-06 , -7.356715084057034e-05 , -1.785631352650215e-06 , 1.695100826863567e-05 , 5.774682432637083e-07 , -3.303613432465353e-07 , 9.651449332646128e-05 ],\n[ 3.979403197295345e-05 , -1.319710553324410e-04 , -1.453365923716808e-04 , 1.586887875775279e-05 , 4.241341573520792e-05 , -2.123041867524383e-05 , -1.526811061895372e-05 , 2.230938441981634e-05 , -3.432017428898139e-06 , -1.410173385828241e-05 , 1.351664987117440e-05 , -7.456805310851761e-07 , -8.448838581047687e-06 , 6.462723202670970e-06 , 1.401726052082422e-06 , -5.693055610174417e-06 , 2.844605015203572e-06 , 2.996296133918029e-06 , -5.566702444135167e-06 , 2.634638058314581e-06 , 2.663616882515997e-05 , -5.269885642910686e-06 , -8.153912579909767e-05 , 1.324825159079404e-06 , 1.082133675166925e-04 , 2.990415878922840e-06 , -6.513246311773947e-05 , -2.759724213714544e-06 , 1.484095638923724e-05 , 7.424809301046746e-07 , -1.617594954504215e-04 ],\n[ 5.883724662691994e-05 , 1.948452573660281e-04 , 1.270119640983281e-04 , -3.017037178432670e-05 , -2.624727597578309e-05 , 2.397646436678337e-05 , 3.047660598081647e-06 , -1.767486060640050e-05 , 8.827938351713212e-06 , 8.068421998378197e-06 , -1.391917758734134e-05 , 5.695968329623178e-06 , 5.271239541237441e-06 , -8.176454311340913e-06 , 2.631914429094653e-06 , 3.898194171094623e-06 , -4.927439995523706e-06 , 5.026786485483527e-07 , 4.241342392780371e-06 , -4.584365006125614e-06 , 4.441129647729196e-07 , 2.508762233822091e-05 , -4.652337820383076e-06 , -7.356715084057034e-05 , 2.990415878922861e-06 , 9.541694080046339e-05 , 5.311088722428387e-07 , -5.655395254747548e-05 , -7.544356044794082e-07 , 1.269980847624510e-05 , 4.696018935268347e-05 ],\n[ -8.868728142024831e-05 , -5.101630490843126e-05 , 1.393651877690296e-05 , 6.647594986721235e-06 , -1.229090821564965e-05 , 9.138618011586676e-07 , 9.299255727538887e-06 , -6.845063675872692e-06 , -2.192391060027468e-06 , 6.590533164499501e-06 , -3.264922954751675e-06 , -2.204234030240666e-06 , 3.753161433794360e-06 , -1.037965911726866e-06 , -1.879900165857787e-06 , 1.890193310260486e-06 , 2.779858179450956e-07 , -1.803524706078243e-06 , 1.125163314158881e-06 , 7.784307399132557e-07 , -1.823900470977467e-06 , 1.002347324957483e-06 , 1.937304772679643e-05 , -1.785631352650217e-06 , -6.513246311773947e-05 , 5.311088722428587e-07 , 7.440208775369848e-05 , 7.311641032314037e-07 , -2.774078047441206e-05 , -4.408828958294675e-07 , 1.075017250578020e-04 ],\n[ 1.649195968391140e-05 , -9.176394513867907e-05 , -8.735349638012086e-05 , 1.545376441325374e-05 , 2.348090332681419e-05 , -1.527849014454438e-05 , -8.183025849839297e-06 , 1.581526117380169e-05 , -4.171466247118380e-06 , -9.628875957888362e-06 , 1.128720431864013e-05 , -2.458146094280058e-06 , -5.679341230392763e-06 , 5.963104944027804e-06 , -6.802392260490372e-07 , -3.494268997347246e-06 , 2.890920446156225e-06 , 7.612853881616096e-07 , -3.300826353062134e-06 , 2.345452381285531e-06 , 9.131027910925789e-07 , -3.233685256439427e-06 , 2.478096542996079e-06 , 1.695100826863569e-05 , -2.759724213714545e-06 , -5.655395254747549e-05 , 7.311641032314153e-07 , 6.559666484932615e-05 , 1.240877065411180e-07 , -2.470688255280269e-05 , -9.189338863514660e-05 ],\n[ 3.986378541236639e-05 , 4.717722996544147e-05 , 1.562163815155139e-05 , -5.578313586592747e-06 , -2.215657597169136e-07 , 2.261408120955417e-06 , -2.016271133614381e-06 , 2.198506926483088e-07 , 1.331053824099042e-06 , -1.186884523575363e-06 , -1.552278484090472e-07 , 1.077364537604021e-06 , -7.297839478992591e-07 , -2.287646204875707e-07 , 6.412891565621495e-07 , -2.097250054628229e-07 , -3.536840533005254e-07 , 3.175194859018434e-07 , 2.381295916739206e-07 , -6.188482408033085e-07 , 3.423181895869513e-07 , 3.615248228908187e-07 , -8.169606503678325e-07 , 5.774682432637071e-07 , 1.484095638923725e-05 , -7.544356044794156e-07 , -2.774078047441205e-05 , 1.240877065411238e-07 , 1.330905767924987e-05 , 8.884104622005010e-08 , -3.158609279173533e-05 ],\n[ -2.080734204109082e-05 , 7.111249931498269e-06 , 1.819382613181743e-05 , -2.498675358118083e-06 , -6.444872622960494e-06 , 3.617283769858598e-06 , 3.118202698102355e-06 , -4.837425950871769e-06 , 8.121122753846729e-07 , 3.379003341109011e-06 , -3.464691582178025e-06 , 4.312391512705559e-07 , 1.996414791054076e-06 , -1.804397982061937e-06 , 5.793723170821257e-08 , 1.107934512468949e-06 , -7.989052895188420e-07 , -2.524196216716127e-07 , 8.492464195141338e-07 , -4.998403651495291e-07 , -3.248030257457955e-07 , 7.731232588721048e-07 , -4.287488876009484e-07 , -3.303613432465375e-07 , 7.424809301046709e-07 , 1.269980847624510e-05 , -4.408828958294696e-07 , -2.470688255280269e-05 , 8.884104622005171e-08 , 1.197542910948322e-05 , 3.878501241188344e-05 ],\n[ -5.210020320049051e-02 , -1.890906559693971e-02 , 1.382668594719924e-02 , -7.076421937386331e-03 , 7.322667111787697e-04 , 2.325697711870943e-03 , -1.983005807705755e-03 , -2.819410239254837e-05 , 1.468573793837301e-03 , -1.318935000558654e-03 , 2.259380952893342e-04 , 5.884326361165944e-04 , -5.689656491774901e-04 , 6.675499678278620e-05 , 2.979440856739906e-04 , -2.578053969849344e-04 , -2.873774500945195e-05 , 2.671139718648887e-04 , -2.789569803656384e-04 , 8.079312086266559e-05 , 1.565114731653709e-04 , -2.364008973553556e-04 , 1.035998031439817e-04 , 9.651449332646111e-05 , -1.617594954504337e-04 , 4.696018935269557e-05 , 1.075017250578020e-04 , -9.189338863515410e-05 , -3.158609279173351e-05 , 3.878501241188487e-05 , 2.121632678397157e-01 ]])\n"},"license":{"kind":"string","value":"gpl-2.0"}}},{"rowIdx":2732,"cells":{"repo_name":{"kind":"string","value":"Asquera/bigcouch"},"path":{"kind":"string","value":"couchjs/scons/scons-local-2.0.1/SCons/Tool/packaging/msi.py"},"copies":{"kind":"string","value":"61"},"size":{"kind":"string","value":"20261"},"content":{"kind":"string","value":"\"\"\"SCons.Tool.packaging.msi\n\nThe msi packager.\n\"\"\"\n\n#\n# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 The SCons Foundation\n# \n# Permission is hereby granted, free of charge, to any person obtaining\n# a copy of this software and associated documentation files (the\n# \"Software\"), to deal in the Software without restriction, including\n# without limitation the rights to use, copy, modify, merge, publish,\n# distribute, sublicense, and/or sell copies of the Software, and to\n# permit persons to whom the Software is furnished to do so, subject to\n# the following conditions:\n#\n# The above copyright notice and this permission notice shall be included\n# in all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY\n# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE\n# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\n# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE\n# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION\n# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION\n# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\n__revision__ = \"src/engine/SCons/Tool/packaging/msi.py 5134 2010/08/16 23:02:40 bdeegan\"\n\nimport os\nimport SCons\nfrom SCons.Action import Action\nfrom SCons.Builder import Builder\n\nfrom xml.dom.minidom import *\nfrom xml.sax.saxutils import escape\n\nfrom SCons.Tool.packaging import stripinstallbuilder\n\n#\n# Utility functions\n#\ndef convert_to_id(s, id_set):\n \"\"\" Some parts of .wxs need an Id attribute (for example: The File and\n Directory directives. The charset is limited to A-Z, a-z, digits,\n underscores, periods. Each Id must begin with a letter or with a\n underscore. Google for \"CNDL0015\" for information about this.\n\n Requirements:\n * the string created must only contain chars from the target charset.\n * the string created must have a minimal editing distance from the\n original string.\n * the string created must be unique for the whole .wxs file.\n\n Observation:\n * There are 62 chars in the charset.\n\n Idea:\n * filter out forbidden characters. Check for a collision with the help\n of the id_set. Add the number of the number of the collision at the\n end of the created string. Furthermore care for a correct start of\n the string.\n \"\"\"\n charset = 'ABCDEFGHIJKLMNOPQRSTUVWXYabcdefghijklmnopqrstuvwxyz0123456789_.'\n if s[0] in '0123456789.':\n s += '_'+s\n id = [c for c in s if c in charset]\n\n # did we already generate an id for this file?\n try:\n return id_set[id][s]\n except KeyError:\n # no we did not so initialize with the id\n if id not in id_set: id_set[id] = { s : id }\n # there is a collision, generate an id which is unique by appending\n # the collision number\n else: id_set[id][s] = id + str(len(id_set[id]))\n\n return id_set[id][s]\n\ndef is_dos_short_file_name(file):\n \"\"\" examine if the given file is in the 8.3 form.\n \"\"\"\n fname, ext = os.path.splitext(file)\n proper_ext = len(ext) == 0 or (2 <= len(ext) <= 4) # the ext contains the dot\n proper_fname = file.isupper() and len(fname) <= 8\n\n return proper_ext and proper_fname\n\ndef gen_dos_short_file_name(file, filename_set):\n \"\"\" see http://support.microsoft.com/default.aspx?scid=kb;en-us;Q142982\n\n These are no complete 8.3 dos short names. The ~ char is missing and \n replaced with one character from the filename. WiX warns about such\n filenames, since a collision might occur. Google for \"CNDL1014\" for\n more information.\n \"\"\"\n # guard this to not confuse the generation\n if is_dos_short_file_name(file):\n return file\n\n fname, ext = os.path.splitext(file) # ext contains the dot\n\n # first try if it suffices to convert to upper\n file = file.upper()\n if is_dos_short_file_name(file):\n return file\n\n # strip forbidden characters.\n forbidden = '.\"/[]:;=, '\n fname = [c for c in fname if c not in forbidden]\n\n # check if we already generated a filename with the same number:\n # thisis1.txt, thisis2.txt etc.\n duplicate, num = not None, 1\n while duplicate:\n shortname = \"%s%s\" % (fname[:8-len(str(num))].upper(),\\\n str(num))\n if len(ext) >= 2:\n shortname = \"%s%s\" % (shortname, ext[:4].upper())\n\n duplicate, num = shortname in filename_set, num+1\n\n assert( is_dos_short_file_name(shortname) ), 'shortname is %s, longname is %s' % (shortname, file)\n filename_set.append(shortname)\n return shortname\n\ndef create_feature_dict(files):\n \"\"\" X_MSI_FEATURE and doc FileTag's can be used to collect files in a\n hierarchy. This function collects the files into this hierarchy.\n \"\"\"\n dict = {}\n\n def add_to_dict( feature, file ):\n if not SCons.Util.is_List( feature ):\n feature = [ feature ]\n\n for f in feature:\n if f not in dict:\n dict[ f ] = [ file ]\n else:\n dict[ f ].append( file )\n\n for file in files:\n if hasattr( file, 'PACKAGING_X_MSI_FEATURE' ):\n add_to_dict(file.PACKAGING_X_MSI_FEATURE, file)\n elif hasattr( file, 'PACKAGING_DOC' ):\n add_to_dict( 'PACKAGING_DOC', file )\n else:\n add_to_dict( 'default', file )\n\n return dict\n\ndef generate_guids(root):\n \"\"\" generates globally unique identifiers for parts of the xml which need\n them.\n\n Component tags have a special requirement. Their UUID is only allowed to\n change if the list of their contained resources has changed. This allows\n for clean removal and proper updates.\n\n To handle this requirement, the uuid is generated with an md5 hashing the\n whole subtree of a xml node.\n \"\"\"\n from hashlib import md5\n\n # specify which tags need a guid and in which attribute this should be stored.\n needs_id = { 'Product' : 'Id',\n 'Package' : 'Id',\n 'Component' : 'Guid',\n }\n\n # find all XMl nodes matching the key, retrieve their attribute, hash their\n # subtree, convert hash to string and add as a attribute to the xml node.\n for (key,value) in needs_id.items():\n node_list = root.getElementsByTagName(key)\n attribute = value\n for node in node_list:\n hash = md5(node.toxml()).hexdigest()\n hash_str = '%s-%s-%s-%s-%s' % ( hash[:8], hash[8:12], hash[12:16], hash[16:20], hash[20:] )\n node.attributes[attribute] = hash_str\n\n\n\ndef string_wxsfile(target, source, env):\n return \"building WiX file %s\"%( target[0].path )\n\ndef build_wxsfile(target, source, env):\n \"\"\" compiles a .wxs file from the keywords given in env['msi_spec'] and\n by analyzing the tree of source nodes and their tags.\n \"\"\"\n file = open(target[0].abspath, 'w')\n\n try:\n # Create a document with the Wix root tag\n doc = Document()\n root = doc.createElement( 'Wix' )\n root.attributes['xmlns']='http://schemas.microsoft.com/wix/2003/01/wi'\n doc.appendChild( root )\n\n filename_set = [] # this is to circumvent duplicates in the shortnames\n id_set = {} # this is to circumvent duplicates in the ids\n\n # Create the content\n build_wxsfile_header_section(root, env)\n build_wxsfile_file_section(root, source, env['NAME'], env['VERSION'], env['VENDOR'], filename_set, id_set)\n generate_guids(root)\n build_wxsfile_features_section(root, source, env['NAME'], env['VERSION'], env['SUMMARY'], id_set)\n build_wxsfile_default_gui(root)\n build_license_file(target[0].get_dir(), env)\n\n # write the xml to a file\n file.write( doc.toprettyxml() )\n\n # call a user specified function\n if 'CHANGE_SPECFILE' in env:\n env['CHANGE_SPECFILE'](target, source)\n\n except KeyError, e:\n raise SCons.Errors.UserError( '\"%s\" package field for MSI is missing.' % e.args[0] )\n\n#\n# setup function\n#\ndef create_default_directory_layout(root, NAME, VERSION, VENDOR, filename_set):\n \"\"\" Create the wix default target directory layout and return the innermost\n directory.\n\n We assume that the XML tree delivered in the root argument already contains\n the Product tag.\n\n Everything is put under the PFiles directory property defined by WiX.\n After that a directory with the 'VENDOR' tag is placed and then a\n directory with the name of the project and its VERSION. This leads to the\n following TARGET Directory Layout:\n C:\\\\\\\\\n Example: C:\\Programme\\Company\\Product-1.2\\\n \"\"\"\n doc = Document()\n d1 = doc.createElement( 'Directory' )\n d1.attributes['Id'] = 'TARGETDIR'\n d1.attributes['Name'] = 'SourceDir'\n\n d2 = doc.createElement( 'Directory' )\n d2.attributes['Id'] = 'ProgramFilesFolder'\n d2.attributes['Name'] = 'PFiles'\n\n d3 = doc.createElement( 'Directory' )\n d3.attributes['Id'] = 'VENDOR_folder'\n d3.attributes['Name'] = escape( gen_dos_short_file_name( VENDOR, filename_set ) )\n d3.attributes['LongName'] = escape( VENDOR )\n\n d4 = doc.createElement( 'Directory' )\n project_folder = \"%s-%s\" % ( NAME, VERSION )\n d4.attributes['Id'] = 'MY_DEFAULT_FOLDER'\n d4.attributes['Name'] = escape( gen_dos_short_file_name( project_folder, filename_set ) )\n d4.attributes['LongName'] = escape( project_folder )\n\n d1.childNodes.append( d2 )\n d2.childNodes.append( d3 )\n d3.childNodes.append( d4 )\n\n root.getElementsByTagName('Product')[0].childNodes.append( d1 )\n\n return d4\n\n#\n# mandatory and optional file tags\n#\ndef build_wxsfile_file_section(root, files, NAME, VERSION, VENDOR, filename_set, id_set):\n \"\"\" builds the Component sections of the wxs file with their included files.\n\n Files need to be specified in 8.3 format and in the long name format, long\n filenames will be converted automatically.\n\n Features are specficied with the 'X_MSI_FEATURE' or 'DOC' FileTag.\n \"\"\"\n root = create_default_directory_layout( root, NAME, VERSION, VENDOR, filename_set )\n components = create_feature_dict( files )\n factory = Document()\n\n def get_directory( node, dir ):\n \"\"\" returns the node under the given node representing the directory.\n\n Returns the component node if dir is None or empty.\n \"\"\"\n if dir == '' or not dir:\n return node\n\n Directory = node\n dir_parts = dir.split(os.path.sep)\n\n # to make sure that our directory ids are unique, the parent folders are\n # consecutively added to upper_dir\n upper_dir = ''\n\n # walk down the xml tree finding parts of the directory\n dir_parts = [d for d in dir_parts if d != '']\n for d in dir_parts[:]:\n already_created = [c for c in Directory.childNodes\n if c.nodeName == 'Directory'\n and c.attributes['LongName'].value == escape(d)] \n\n if already_created != []:\n Directory = already_created[0]\n dir_parts.remove(d)\n upper_dir += d\n else:\n break\n\n for d in dir_parts:\n nDirectory = factory.createElement( 'Directory' )\n nDirectory.attributes['LongName'] = escape( d )\n nDirectory.attributes['Name'] = escape( gen_dos_short_file_name( d, filename_set ) )\n upper_dir += d\n nDirectory.attributes['Id'] = convert_to_id( upper_dir, id_set )\n\n Directory.childNodes.append( nDirectory )\n Directory = nDirectory\n\n return Directory\n\n for file in files:\n drive, path = os.path.splitdrive( file.PACKAGING_INSTALL_LOCATION )\n filename = os.path.basename( path )\n dirname = os.path.dirname( path )\n\n h = {\n # tagname : default value\n 'PACKAGING_X_MSI_VITAL' : 'yes',\n 'PACKAGING_X_MSI_FILEID' : convert_to_id(filename, id_set),\n 'PACKAGING_X_MSI_LONGNAME' : filename,\n 'PACKAGING_X_MSI_SHORTNAME' : gen_dos_short_file_name(filename, filename_set),\n 'PACKAGING_X_MSI_SOURCE' : file.get_path(),\n }\n\n # fill in the default tags given above.\n for k,v in [ (k, v) for (k,v) in h.items() if not hasattr(file, k) ]:\n setattr( file, k, v )\n\n File = factory.createElement( 'File' )\n File.attributes['LongName'] = escape( file.PACKAGING_X_MSI_LONGNAME )\n File.attributes['Name'] = escape( file.PACKAGING_X_MSI_SHORTNAME )\n File.attributes['Source'] = escape( file.PACKAGING_X_MSI_SOURCE )\n File.attributes['Id'] = escape( file.PACKAGING_X_MSI_FILEID )\n File.attributes['Vital'] = escape( file.PACKAGING_X_MSI_VITAL )\n\n # create the Tag under which this file should appear\n Component = factory.createElement('Component')\n Component.attributes['DiskId'] = '1'\n Component.attributes['Id'] = convert_to_id( filename, id_set )\n\n # hang the component node under the root node and the file node\n # under the component node.\n Directory = get_directory( root, dirname )\n Directory.childNodes.append( Component )\n Component.childNodes.append( File )\n\n#\n# additional functions\n#\ndef build_wxsfile_features_section(root, files, NAME, VERSION, SUMMARY, id_set):\n \"\"\" This function creates the tag based on the supplied xml tree.\n\n This is achieved by finding all s and adding them to a default target.\n\n It should be called after the tree has been built completly. We assume\n that a MY_DEFAULT_FOLDER Property is defined in the wxs file tree.\n\n Furthermore a top-level with the name and VERSION of the software will be created.\n\n An PACKAGING_X_MSI_FEATURE can either be a string, where the feature\n DESCRIPTION will be the same as its title or a Tuple, where the first\n part will be its title and the second its DESCRIPTION.\n \"\"\"\n factory = Document()\n Feature = factory.createElement('Feature')\n Feature.attributes['Id'] = 'complete'\n Feature.attributes['ConfigurableDirectory'] = 'MY_DEFAULT_FOLDER'\n Feature.attributes['Level'] = '1'\n Feature.attributes['Title'] = escape( '%s %s' % (NAME, VERSION) )\n Feature.attributes['Description'] = escape( SUMMARY )\n Feature.attributes['Display'] = 'expand'\n\n for (feature, files) in create_feature_dict(files).items():\n SubFeature = factory.createElement('Feature')\n SubFeature.attributes['Level'] = '1'\n\n if SCons.Util.is_Tuple(feature):\n SubFeature.attributes['Id'] = convert_to_id( feature[0], id_set )\n SubFeature.attributes['Title'] = escape(feature[0])\n SubFeature.attributes['Description'] = escape(feature[1])\n else:\n SubFeature.attributes['Id'] = convert_to_id( feature, id_set )\n if feature=='default':\n SubFeature.attributes['Description'] = 'Main Part'\n SubFeature.attributes['Title'] = 'Main Part'\n elif feature=='PACKAGING_DOC':\n SubFeature.attributes['Description'] = 'Documentation'\n SubFeature.attributes['Title'] = 'Documentation'\n else:\n SubFeature.attributes['Description'] = escape(feature)\n SubFeature.attributes['Title'] = escape(feature)\n\n # build the componentrefs. As one of the design decision is that every\n # file is also a component we walk the list of files and create a\n # reference.\n for f in files:\n ComponentRef = factory.createElement('ComponentRef')\n ComponentRef.attributes['Id'] = convert_to_id( os.path.basename(f.get_path()), id_set )\n SubFeature.childNodes.append(ComponentRef)\n\n Feature.childNodes.append(SubFeature)\n\n root.getElementsByTagName('Product')[0].childNodes.append(Feature)\n\ndef build_wxsfile_default_gui(root):\n \"\"\" this function adds a default GUI to the wxs file\n \"\"\"\n factory = Document()\n Product = root.getElementsByTagName('Product')[0]\n\n UIRef = factory.createElement('UIRef')\n UIRef.attributes['Id'] = 'WixUI_Mondo'\n Product.childNodes.append(UIRef)\n\n UIRef = factory.createElement('UIRef')\n UIRef.attributes['Id'] = 'WixUI_ErrorProgressText'\n Product.childNodes.append(UIRef)\n\ndef build_license_file(directory, spec):\n \"\"\" creates a License.rtf file with the content of \"X_MSI_LICENSE_TEXT\"\n in the given directory\n \"\"\"\n name, text = '', ''\n\n try:\n name = spec['LICENSE']\n text = spec['X_MSI_LICENSE_TEXT']\n except KeyError:\n pass # ignore this as X_MSI_LICENSE_TEXT is optional\n\n if name!='' or text!='':\n file = open( os.path.join(directory.get_path(), 'License.rtf'), 'w' )\n file.write('{\\\\rtf')\n if text!='':\n file.write(text.replace('\\n', '\\\\par '))\n else:\n file.write(name+'\\\\par\\\\par')\n file.write('}')\n file.close()\n\n#\n# mandatory and optional package tags\n#\ndef build_wxsfile_header_section(root, spec):\n \"\"\" Adds the xml file node which define the package meta-data.\n \"\"\"\n # Create the needed DOM nodes and add them at the correct position in the tree.\n factory = Document()\n Product = factory.createElement( 'Product' )\n Package = factory.createElement( 'Package' )\n\n root.childNodes.append( Product )\n Product.childNodes.append( Package )\n\n # set \"mandatory\" default values\n if 'X_MSI_LANGUAGE' not in spec:\n spec['X_MSI_LANGUAGE'] = '1033' # select english\n\n # mandatory sections, will throw a KeyError if the tag is not available\n Product.attributes['Name'] = escape( spec['NAME'] )\n Product.attributes['Version'] = escape( spec['VERSION'] )\n Product.attributes['Manufacturer'] = escape( spec['VENDOR'] )\n Product.attributes['Language'] = escape( spec['X_MSI_LANGUAGE'] )\n Package.attributes['Description'] = escape( spec['SUMMARY'] )\n\n # now the optional tags, for which we avoid the KeyErrror exception\n if 'DESCRIPTION' in spec:\n Package.attributes['Comments'] = escape( spec['DESCRIPTION'] )\n\n if 'X_MSI_UPGRADE_CODE' in spec:\n Package.attributes['X_MSI_UPGRADE_CODE'] = escape( spec['X_MSI_UPGRADE_CODE'] )\n\n # We hardcode the media tag as our current model cannot handle it.\n Media = factory.createElement('Media')\n Media.attributes['Id'] = '1'\n Media.attributes['Cabinet'] = 'default.cab'\n Media.attributes['EmbedCab'] = 'yes'\n root.getElementsByTagName('Product')[0].childNodes.append(Media)\n\n# this builder is the entry-point for .wxs file compiler.\nwxs_builder = Builder(\n action = Action( build_wxsfile, string_wxsfile ),\n ensure_suffix = '.wxs' )\n\ndef package(env, target, source, PACKAGEROOT, NAME, VERSION,\n DESCRIPTION, SUMMARY, VENDOR, X_MSI_LANGUAGE, **kw):\n # make sure that the Wix Builder is in the environment\n SCons.Tool.Tool('wix').generate(env)\n\n # get put the keywords for the specfile compiler. These are the arguments\n # given to the package function and all optional ones stored in kw, minus\n # the the source, target and env one.\n loc = locals()\n del loc['kw']\n kw.update(loc)\n del kw['source'], kw['target'], kw['env']\n\n # strip the install builder from the source files\n target, source = stripinstallbuilder(target, source, env)\n\n # put the arguments into the env and call the specfile builder.\n env['msi_spec'] = kw\n specfile = wxs_builder(* [env, target, source], **kw)\n\n # now call the WiX Tool with the built specfile added as a source.\n msifile = env.WiX(target, specfile)\n\n # return the target and source tuple.\n return (msifile, source+[specfile])\n\n# Local Variables:\n# tab-width:4\n# indent-tabs-mode:nil\n# End:\n# vim: set expandtab tabstop=4 shiftwidth=4:\n"},"license":{"kind":"string","value":"apache-2.0"}}},{"rowIdx":2733,"cells":{"repo_name":{"kind":"string","value":"johnkit/vtk-dev"},"path":{"kind":"string","value":"Filters/Hybrid/Testing/Python/TestGridWarp3D.py"},"copies":{"kind":"string","value":"20"},"size":{"kind":"string","value":"2154"},"content":{"kind":"string","value":"#!/usr/bin/env python\nimport vtk\nfrom vtk.test import Testing\nfrom vtk.util.misc import vtkGetDataRoot\nVTK_DATA_ROOT = vtkGetDataRoot()\n\n# Image pipeline\nreader = vtk.vtkImageReader()\nreader.ReleaseDataFlagOff()\nreader.SetDataByteOrderToLittleEndian()\nreader.SetDataExtent(0,63,0,63,1,93)\nreader.SetDataSpacing(3.2,3.2,1.5)\nreader.SetDataOrigin(-100.8,-100.8,-69)\nreader.SetFilePrefix(\"\" + str(VTK_DATA_ROOT) + \"/Data/headsq/quarter\")\nreader.SetDataMask(0x7fff)\nreader.Update()\np1 = vtk.vtkPoints()\np2 = vtk.vtkPoints()\np1.InsertNextPoint(0,0,0)\np2.InsertNextPoint(-60,10,20)\np1.InsertNextPoint(-100,-100,-50)\np2.InsertNextPoint(-100,-100,-50)\np1.InsertNextPoint(-100,-100,50)\np2.InsertNextPoint(-100,-100,50)\np1.InsertNextPoint(-100,100,-50)\np2.InsertNextPoint(-100,100,-50)\np1.InsertNextPoint(-100,100,50)\np2.InsertNextPoint(-100,100,50)\np1.InsertNextPoint(100,-100,-50)\np2.InsertNextPoint(100,-100,-50)\np1.InsertNextPoint(100,-100,50)\np2.InsertNextPoint(100,-100,50)\np1.InsertNextPoint(100,100,-50)\np2.InsertNextPoint(100,100,-50)\np1.InsertNextPoint(100,100,50)\np2.InsertNextPoint(100,100,50)\ntransform = vtk.vtkThinPlateSplineTransform()\ntransform.SetSourceLandmarks(p1)\ntransform.SetTargetLandmarks(p2)\ntransform.SetBasisToR()\ngridThinPlate = vtk.vtkTransformToGrid()\ngridThinPlate.SetInput(transform)\ngridThinPlate.SetGridExtent(0,64,0,64,0,50)\ngridThinPlate.SetGridSpacing(3.2,3.2,3.0)\ngridThinPlate.SetGridOrigin(-102.4,-102.4,-75)\ngridThinPlate.SetGridScalarTypeToUnsignedChar()\ngridThinPlate.Update()\ngridTransform = vtk.vtkGridTransform()\ngridTransform.SetDisplacementGridData(gridThinPlate.GetOutput())\ngridTransform.SetDisplacementShift(gridThinPlate.GetDisplacementShift())\ngridTransform.SetDisplacementScale(gridThinPlate.GetDisplacementScale())\nreslice = vtk.vtkImageReslice()\nreslice.SetInputConnection(reader.GetOutputPort())\nreslice.SetResliceTransform(gridTransform)\nreslice.SetInterpolationModeToLinear()\nreslice.SetOutputSpacing(1,1,1)\nviewer = vtk.vtkImageViewer()\nviewer.SetInputConnection(reslice.GetOutputPort())\nviewer.SetZSlice(70)\nviewer.SetColorWindow(2000)\nviewer.SetColorLevel(1000)\nviewer.Render()\n# --- end of script --\n"},"license":{"kind":"string","value":"bsd-3-clause"}}},{"rowIdx":2734,"cells":{"repo_name":{"kind":"string","value":"cossacklabs/acra"},"path":{"kind":"string","value":"wrappers/python/acrawriter/django/__init__.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"3334"},"content":{"kind":"string","value":"# Copyright 2016, Cossack Labs Limited\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# coding: utf-8\nfrom django.core import validators\nfrom django.db import models\nfrom django import forms\nfrom django.utils import six\nfrom django.conf import settings\nfrom django.utils.translation import ugettext_lazy as _\nimport acrawriter\n\n__all__ = ('CharField', 'EmailField', 'TextField')\n\n\nclass CharField(models.CharField):\n def __init__(self, public_key=None, encoding='utf-8',\n encoding_errors='ignore', *args, **kwargs):\n super(CharField, self).__init__(*args, **kwargs)\n self._encoding = encoding\n self._encoding_errors = encoding_errors\n if not (public_key or settings.ACRA_SERVER_PUBLIC_KEY):\n raise ValueError(\"Set public key arg or settings.ACRA_SERVER_PUBLIC_KEY\")\n self._public_key = public_key or settings.ACRA_SERVER_PUBLIC_KEY\n\n def from_db_value(self, value, *args, **kwargs):\n if isinstance(value, memoryview):\n value = value.tobytes()\n if isinstance(value, six.binary_type):\n return value.decode(self._encoding, errors=self._encoding_errors)\n else:\n return value\n\n def get_db_prep_value(self, value, connection, prepared=False):\n value = super(CharField, self).get_db_prep_value(\n value, connection, prepared)\n if value == '':\n return b''\n elif value is None:\n return None\n else:\n return acrawriter.create_acrastruct(value.encode(self._encoding), self._public_key)\n\n def get_internal_type(self):\n return 'BinaryField'\n\n def to_python(self, value):\n value = super(CharField, self).to_python(value)\n if isinstance(value, six.binary_type):\n return value.decode(self._encoding, errors=self._encoding_errors)\n else:\n return value\n\n\nclass EmailField(CharField):\n default_validators = [validators.validate_email]\n description = _(\"Email address\")\n\n def __init__(self, *args, **kwargs):\n kwargs['max_length'] = kwargs.get('max_length', 254)\n super(EmailField, self).__init__(*args, **kwargs)\n\n\nclass TextField(CharField):\n description = _(\"Text\")\n\n def __init__(self, *args, **kwargs):\n super(TextField, self).__init__(*args, **kwargs)\n self.validators = []\n\n def formfield(self, **kwargs):\n # Passing max_length to forms.CharField means that the value's length\n # will be validated twice. This is considered acceptable since we want\n # the value in the form field (to pass into widget for example).\n defaults = {'max_length': self.max_length, 'widget': forms.Textarea}\n defaults.update(kwargs)\n return super(TextField, self).formfield(**defaults)\n\n def check(self, **kwargs):\n return []\n"},"license":{"kind":"string","value":"apache-2.0"}}},{"rowIdx":2735,"cells":{"repo_name":{"kind":"string","value":"ds-hwang/chromium-crosswalk"},"path":{"kind":"string","value":"tools/android/loading/resource_sack_display.py"},"copies":{"kind":"string","value":"3"},"size":{"kind":"string","value":"4182"},"content":{"kind":"string","value":"# Copyright 2016 The Chromium Authors. All rights reserved.\n# Use of this source code is governed by a BSD-style license that can be\n# found in the LICENSE file.\n\n\"\"\"Utilities for displaying a ResourceSack.\n\nWhen run standalone, takes traces on the command line and produces a dot file to\nstdout.\n\"\"\"\n\n\ndef ToDot(sack, output, prune=-1, long_edge_msec=2000):\n \"\"\"Output as a dot file.\n\n Args:\n sack: (ResourceSack) the sack to convert to dot.\n output: a file-like output stream.\n prune: if positive, prune & coalesce nodes under the specified threshold\n of repeated views, as fraction node views / total graphs. All pruned\n nodes are represented by a single node, and an edge is connected only if\n the view count is greater than 1.\n long_edge_msec: if positive, the definition of a long edge. Long edges are\n distinguished in graph.\n \"\"\"\n output.write(\"\"\"digraph dependencies {\n rankdir = LR;\n \"\"\")\n\n pruned = set()\n num_graphs = len(sack.graph_info)\n for bag in sack.bags:\n if prune > 0 and float(len(bag.graphs)) / num_graphs < prune:\n pruned.add(bag)\n continue\n output.write('%d [label=\"%s (%d)\\n(%d, %d)\\n(%.2f, %.2f)\" shape=%s; '\n 'style=filled; fillcolor=%s];\\n' % (\n bag.Index(), bag.label, len(bag.graphs),\n min(bag.total_costs), max(bag.total_costs),\n min(bag.relative_costs), max(bag.relative_costs),\n _CriticalToShape(bag),\n _AmountToNodeColor(len(bag.graphs), num_graphs)))\n\n if pruned:\n pruned_index = num_graphs\n output.write('%d [label=\"Pruned at %.0f%%\\n(%d)\"; '\n 'shape=polygon; style=dotted];\\n' %\n (pruned_index, 100 * prune, len(pruned)))\n\n for bag in sack.bags:\n if bag in pruned:\n for succ in bag.Successors():\n if succ not in pruned:\n output.write('%d -> %d [style=dashed];\\n' % (\n pruned_index, succ.Index()))\n for succ in bag.Successors():\n if succ in pruned:\n if len(bag.successor_sources[succ]) > 1:\n output.write('%d -> %d [label=\"%d\"; style=dashed];\\n' % (\n bag.Index(), pruned_index, len(bag.successor_sources[succ])))\n else:\n num_succ = len(bag.successor_sources[succ])\n num_long = 0\n for graph, source, target in bag.successor_sources[succ]:\n if graph.EdgeCost(source, target) > long_edge_msec:\n num_long += 1\n if num_long > 0:\n long_frac = float(num_long) / num_succ\n long_edge_style = '; penwidth=%f' % (2 + 6.0 * long_frac)\n if long_frac < 0.75:\n long_edge_style += '; style=dashed'\n else:\n long_edge_style = ''\n min_edge = min(bag.successor_edge_costs[succ])\n max_edge = max(bag.successor_edge_costs[succ])\n output.write('%d -> %d [label=\"%d\\n(%f,%f)\"; color=%s %s];\\n' % (\n bag.Index(), succ.Index(), num_succ, min_edge, max_edge,\n _AmountToEdgeColor(num_succ, len(bag.graphs)),\n long_edge_style))\n\n output.write('}')\n\n\ndef _CriticalToShape(bag):\n frac = float(bag.num_critical) / bag.num_nodes\n if frac < 0.4:\n return 'oval'\n elif frac < 0.7:\n return 'polygon'\n elif frac < 0.9:\n return 'trapezium'\n return 'box'\n\n\ndef _AmountToNodeColor(numer, denom):\n if denom <= 0:\n return 'grey72'\n ratio = 1.0 * numer / denom\n if ratio < .3:\n return 'white'\n elif ratio < .6:\n return 'yellow'\n elif ratio < .8:\n return 'orange'\n return 'green'\n\n\ndef _AmountToEdgeColor(numer, denom):\n color = _AmountToNodeColor(numer, denom)\n if color == 'white' or color == 'grey72':\n return 'black'\n return color\n\n\ndef _Main():\n import json\n import logging\n import sys\n\n import loading_model\n import loading_trace\n import resource_sack\n\n sack = resource_sack.GraphSack()\n for fname in sys.argv[1:]:\n trace = loading_trace.LoadingTrace.FromJsonDict(\n json.load(open(fname)))\n logging.info('Making graph from %s', fname)\n model = loading_model.ResourceGraph(trace, content_lens=None)\n sack.ConsumeGraph(model)\n logging.info('Finished %s', fname)\n ToDot(sack, sys.stdout, prune=.1)\n\n\nif __name__ == '__main__':\n _Main()\n"},"license":{"kind":"string","value":"bsd-3-clause"}}},{"rowIdx":2736,"cells":{"repo_name":{"kind":"string","value":"overra/node-gyp"},"path":{"kind":"string","value":"gyp/pylib/gyp/MSVSToolFile.py"},"copies":{"kind":"string","value":"2736"},"size":{"kind":"string","value":"1804"},"content":{"kind":"string","value":"# Copyright (c) 2012 Google Inc. All rights reserved.\n# Use of this source code is governed by a BSD-style license that can be\n# found in the LICENSE file.\n\n\"\"\"Visual Studio project reader/writer.\"\"\"\n\nimport gyp.common\nimport gyp.easy_xml as easy_xml\n\n\nclass Writer(object):\n \"\"\"Visual Studio XML tool file writer.\"\"\"\n\n def __init__(self, tool_file_path, name):\n \"\"\"Initializes the tool file.\n\n Args:\n tool_file_path: Path to the tool file.\n name: Name of the tool file.\n \"\"\"\n self.tool_file_path = tool_file_path\n self.name = name\n self.rules_section = ['Rules']\n\n def AddCustomBuildRule(self, name, cmd, description,\n additional_dependencies,\n outputs, extensions):\n \"\"\"Adds a rule to the tool file.\n\n Args:\n name: Name of the rule.\n description: Description of the rule.\n cmd: Command line of the rule.\n additional_dependencies: other files which may trigger the rule.\n outputs: outputs of the rule.\n extensions: extensions handled by the rule.\n \"\"\"\n rule = ['CustomBuildRule',\n {'Name': name,\n 'ExecutionDescription': description,\n 'CommandLine': cmd,\n 'Outputs': ';'.join(outputs),\n 'FileExtensions': ';'.join(extensions),\n 'AdditionalDependencies':\n ';'.join(additional_dependencies)\n }]\n self.rules_section.append(rule)\n\n def WriteIfChanged(self):\n \"\"\"Writes the tool file.\"\"\"\n content = ['VisualStudioToolFile',\n {'Version': '8.00',\n 'Name': self.name\n },\n self.rules_section\n ]\n easy_xml.WriteXmlIfChanged(content, self.tool_file_path,\n encoding=\"Windows-1252\")\n"},"license":{"kind":"string","value":"mit"}}},{"rowIdx":2737,"cells":{"repo_name":{"kind":"string","value":"yugangw-msft/azure-cli"},"path":{"kind":"string","value":"src/azure-cli/azure/cli/command_modules/vm/manual/custom.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"1298"},"content":{"kind":"string","value":"# --------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt in the project root for\n# license information.\n#\n# Code generated by Microsoft (R) AutoRest Code Generator.\n# Changes may cause incorrect behavior and will be lost if the code is\n# regenerated.\n# --------------------------------------------------------------------------\n# pylint: disable=too-many-lines\n\n\ndef sshkey_create(client,\n resource_group_name,\n ssh_public_key_name,\n location,\n tags=None,\n public_key=None):\n parameters = {\n 'location': location,\n 'tags': tags,\n 'public_key': public_key\n }\n client.create(resource_group_name=resource_group_name,\n ssh_public_key_name=ssh_public_key_name,\n parameters=parameters)\n if public_key is None: # Generate one if public key is None\n client.generate_key_pair(resource_group_name=resource_group_name,\n ssh_public_key_name=ssh_public_key_name)\n return client.get(resource_group_name=resource_group_name,\n ssh_public_key_name=ssh_public_key_name)\n"},"license":{"kind":"string","value":"mit"}}},{"rowIdx":2738,"cells":{"repo_name":{"kind":"string","value":"neiudemo1/django"},"path":{"kind":"string","value":"docs/conf.py"},"copies":{"kind":"string","value":"54"},"size":{"kind":"string","value":"11938"},"content":{"kind":"string","value":"# -*- coding: utf-8 -*-\n#\n# Django documentation build configuration file, created by\n# sphinx-quickstart on Thu Mar 27 09:06:53 2008.\n#\n# This file is execfile()d with the current directory set to its containing dir.\n#\n# The contents of this file are pickled, so don't put values in the namespace\n# that aren't pickleable (module imports are okay, they're removed automatically).\n#\n# All configuration values have a default; values that are commented out\n# serve to show the default.\n\nfrom __future__ import unicode_literals\n\nimport sys\nfrom os.path import abspath, dirname, join\n\n# Make sure we get the version of this copy of Django\nsys.path.insert(1, dirname(dirname(abspath(__file__))))\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\nsys.path.append(abspath(join(dirname(__file__), \"_ext\")))\n\n# -- General configuration -----------------------------------------------------\n\n# If your documentation needs a minimal Sphinx version, state it here.\nneeds_sphinx = '1.0'\n\n# Add any Sphinx extension module names here, as strings. They can be extensions\n# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.\nextensions = [\n \"djangodocs\",\n \"sphinx.ext.intersphinx\",\n \"sphinx.ext.viewcode\",\n \"ticket_role\",\n]\n\n# Spelling check needs an additional module that is not installed by default.\n# Add it only if spelling check is requested so docs can be generated without it.\nif 'spelling' in sys.argv:\n extensions.append(\"sphinxcontrib.spelling\")\n\n# Spelling language.\nspelling_lang = 'en_US'\n\n# Location of word list.\nspelling_word_list_filename = 'spelling_wordlist'\n\n# Add any paths that contain templates here, relative to this directory.\n# templates_path = []\n\n# The suffix of source filenames.\nsource_suffix = '.txt'\n\n# The encoding of source files.\n# source_encoding = 'utf-8-sig'\n\n# The master toctree document.\nmaster_doc = 'contents'\n\n# General substitutions.\nproject = 'Django'\ncopyright = 'Django Software Foundation and contributors'\n\n\n# The version info for the project you're documenting, acts as replacement for\n# |version| and |release|, also used in various other places throughout the\n# built documents.\n#\n# The short X.Y version.\nversion = '1.10'\n# The full version, including alpha/beta/rc tags.\ntry:\n from django import VERSION, get_version\nexcept ImportError:\n release = version\nelse:\n def django_release():\n pep386ver = get_version()\n if VERSION[3:5] == ('alpha', 0) and 'dev' not in pep386ver:\n return pep386ver + '.dev'\n return pep386ver\n\n release = django_release()\n\n# The \"development version\" of Django\ndjango_next_version = '1.10'\n\n# The language for content autogenerated by Sphinx. Refer to documentation\n# for a list of supported languages.\n# language = None\n\n# Location for .po/.mo translation files used when language is set\nlocale_dirs = ['locale/']\n\n# There are two options for replacing |today|: either, you set today to some\n# non-false value, then it is used:\n# today = ''\n# Else, today_fmt is used as the format for a strftime call.\ntoday_fmt = '%B %d, %Y'\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\nexclude_patterns = ['_build']\n\n# The reST default role (used for this markup: `text`) to use for all documents.\n# default_role = None\n\n# If true, '()' will be appended to :func: etc. cross-reference text.\nadd_function_parentheses = True\n\n# If true, the current module name will be prepended to all description\n# unit titles (such as .. function::).\nadd_module_names = False\n\n# If true, sectionauthor and moduleauthor directives will be shown in the\n# output. They are ignored by default.\nshow_authors = False\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = 'trac'\n\n# Links to Python's docs should reference the most recent version of the 3.x\n# branch, which is located at this URL.\nintersphinx_mapping = {\n 'python': ('https://docs.python.org/3/', None),\n 'sphinx': ('http://sphinx-doc.org/', None),\n 'six': ('http://pythonhosted.org/six/', None),\n 'formtools': ('http://django-formtools.readthedocs.org/en/latest/', None),\n 'psycopg2': ('http://initd.org/psycopg/docs/', None),\n}\n\n# Python's docs don't change every week.\nintersphinx_cache_limit = 90 # days\n\n# -- Options for HTML output ---------------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\nhtml_theme = \"djangodocs\"\n\n# Theme options are theme-specific and customize the look and feel of a theme\n# further. For a list of options available for each theme, see the\n# documentation.\n# html_theme_options = {}\n\n# Add any paths that contain custom themes here, relative to this directory.\nhtml_theme_path = [\"_theme\"]\n\n# The name for this set of Sphinx documents. If None, it defaults to\n# \" v documentation\".\n# html_title = None\n\n# A shorter title for the navigation bar. Default is the same as html_title.\n# html_short_title = None\n\n# The name of an image file (relative to this directory) to place at the top\n# of the sidebar.\n# html_logo = None\n\n# The name of an image file (within the static path) to use as favicon of the\n# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32\n# pixels large.\n# html_favicon = None\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\n# html_static_path = [\"_static\"]\n\n# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,\n# using the given strftime format.\nhtml_last_updated_fmt = '%b %d, %Y'\n\n# If true, SmartyPants will be used to convert quotes and dashes to\n# typographically correct entities.\nhtml_use_smartypants = True\n\n# HTML translator class for the builder\nhtml_translator_class = \"djangodocs.DjangoHTMLTranslator\"\n\n# Content template for the index page.\n# html_index = ''\n\n# Custom sidebar templates, maps document names to template names.\n# html_sidebars = {}\n\n# Additional templates that should be rendered to pages, maps page names to\n# template names.\nhtml_additional_pages = {}\n\n# If false, no module index is generated.\n# html_domain_indices = True\n\n# If false, no index is generated.\n# html_use_index = True\n\n# If true, the index is split into individual pages for each letter.\n# html_split_index = False\n\n# If true, links to the reST sources are added to the pages.\n# html_show_sourcelink = True\n\n# If true, \"Created using Sphinx\" is shown in the HTML footer. Default is True.\n# html_show_sphinx = True\n\n# If true, \"(C) Copyright ...\" is shown in the HTML footer. Default is True.\n# html_show_copyright = True\n\n# If true, an OpenSearch description file will be output, and all pages will\n# contain a tag referring to it. The value of this option must be the\n# base URL from which the finished HTML is served.\n# html_use_opensearch = ''\n\n# This is the file name suffix for HTML files (e.g. \".xhtml\").\n# html_file_suffix = None\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = 'Djangodoc'\n\nmodindex_common_prefix = [\"django.\"]\n\n# Appended to every page\nrst_epilog = \"\"\"\n.. |django-users| replace:: :ref:`django-users `\n.. |django-core-mentorship| replace:: :ref:`django-core-mentorship `\n.. |django-developers| replace:: :ref:`django-developers `\n.. |django-announce| replace:: :ref:`django-announce `\n.. |django-updates| replace:: :ref:`django-updates `\n\"\"\"\n\n# -- Options for LaTeX output --------------------------------------------------\n\nlatex_elements = {\n 'preamble': ('\\\\DeclareUnicodeCharacter{2264}{\\\\ensuremath{\\\\le}}'\n '\\\\DeclareUnicodeCharacter{2265}{\\\\ensuremath{\\\\ge}}')\n}\n\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title, author, document class [howto/manual]).\n# latex_documents = []\nlatex_documents = [\n ('contents', 'django.tex', 'Django Documentation',\n 'Django Software Foundation', 'manual'),\n]\n\n# The name of an image file (relative to this directory) to place at the top of\n# the title page.\n# latex_logo = None\n\n# For \"manual\" documents, if this is true, then toplevel headings are parts,\n# not chapters.\n# latex_use_parts = False\n\n# If true, show page references after internal links.\n# latex_show_pagerefs = False\n\n# If true, show URL addresses after external links.\n# latex_show_urls = False\n\n# Documents to append as an appendix to all manuals.\n# latex_appendices = []\n\n# If false, no module index is generated.\n# latex_domain_indices = True\n\n\n# -- Options for manual page output --------------------------------------------\n\n# One entry per manual page. List of tuples\n# (source start file, name, description, authors, manual section).\nman_pages = [(\n 'ref/django-admin',\n 'django-admin',\n 'Utility script for the Django Web framework',\n ['Django Software Foundation'],\n 1\n), ]\n\n\n# -- Options for Texinfo output ------------------------------------------------\n\n# List of tuples (startdocname, targetname, title, author, dir_entry,\n# description, category, toctree_only)\ntexinfo_documents = [(\n master_doc, \"django\", \"\", \"\", \"Django\",\n \"Documentation of the Django framework\", \"Web development\", False\n)]\n\n\n# -- Options for Epub output ---------------------------------------------------\n\n# Bibliographic Dublin Core info.\nepub_title = project\nepub_author = 'Django Software Foundation'\nepub_publisher = 'Django Software Foundation'\nepub_copyright = copyright\n\n# The basename for the epub file. It defaults to the project name.\n# epub_basename = 'Django'\n\n# The HTML theme for the epub output. Since the default themes are not optimized\n# for small screen space, using the same theme for HTML and epub output is\n# usually not wise. This defaults to 'epub', a theme designed to save visual\n# space.\nepub_theme = 'djangodocs-epub'\n\n# The language of the text. It defaults to the language option\n# or en if the language is not set.\n# epub_language = ''\n\n# The scheme of the identifier. Typical schemes are ISBN or URL.\n# epub_scheme = ''\n\n# The unique identifier of the text. This can be an ISBN number\n# or the project homepage.\n# epub_identifier = ''\n\n# A unique identification for the text.\n# epub_uid = ''\n\n# A tuple containing the cover image and cover page html template filenames.\nepub_cover = ('', 'epub-cover.html')\n\n# A sequence of (type, uri, title) tuples for the guide element of content.opf.\n# epub_guide = ()\n\n# HTML files that should be inserted before the pages created by sphinx.\n# The format is a list of tuples containing the path and title.\n# epub_pre_files = []\n\n# HTML files shat should be inserted after the pages created by sphinx.\n# The format is a list of tuples containing the path and title.\n# epub_post_files = []\n\n# A list of files that should not be packed into the epub file.\n# epub_exclude_files = []\n\n# The depth of the table of contents in toc.ncx.\n# epub_tocdepth = 3\n\n# Allow duplicate toc entries.\n# epub_tocdup = True\n\n# Choose between 'default' and 'includehidden'.\n# epub_tocscope = 'default'\n\n# Fix unsupported image types using the PIL.\n# epub_fix_images = False\n\n# Scale large images.\n# epub_max_image_width = 0\n\n# How to display URL addresses: 'footnote', 'no', or 'inline'.\n# epub_show_urls = 'inline'\n\n# If false, no index is generated.\n# epub_use_index = True\n\n# -- ticket options ------------------------------------------------------------\nticket_url = 'https://code.djangoproject.com/ticket/%s'\n"},"license":{"kind":"string","value":"bsd-3-clause"}}},{"rowIdx":2739,"cells":{"repo_name":{"kind":"string","value":"oskopek/devassistant"},"path":{"kind":"string","value":"test/test_actions.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"9352"},"content":{"kind":"string","value":"import os\nimport subprocess\n\nimport pytest\nfrom flexmock import flexmock\n\nfrom devassistant import actions, exceptions\nfrom devassistant.dapi import dapicli\n\nfrom test.logger import LoggingHandler\n\nclass TestActions(object):\n\n def setup_class(self):\n self.ha = actions.HelpAction\n\n def test_get_help_contains_task_keywords(self):\n gh = self.ha().get_help()\n assert 'crt' in gh\n assert 'twk' in gh\n assert 'prep' in gh\n assert 'extra' in gh\n\n def test_get_help_contains_action_name(self):\n a = actions.Action()\n a.name = 'foobar_action_name'\n a.description = 'foobar_action_description'\n actions.register_action(a)\n\n assert 'foobar_action_name' in self.ha().get_help()\n assert 'foobar_action_description' in self.ha().get_help()\n\n def test_format_text_returns_original_text_on_bogus_formatting(self):\n assert self.ha().format_text('aaa', 'foo', 'bar') == 'aaa'\n assert self.ha().format_text('', 'foo', 'bar') == ''\n\n def test_format_text_returns_bold(self):\n assert self.ha().format_text('aaa', 'bold', 'ascii') == '\\033[1maaa\\033[0m'\n\n def test_version_action(self, capsys):\n va = actions.VersionAction()\n from devassistant import __version__ as VERSION\n va.run()\n assert VERSION in capsys.readouterr()[0]\n\n\nclass TestDocAction(object):\n\n def setup_method(self, method):\n self.da = actions.DocAction\n self.tlh = LoggingHandler.create_fresh_handler()\n\n def test_no_docs(self):\n self.da(dap='f').run()\n assert ('INFO', 'DAP f has no documentation.') in self.tlh.msgs\n\n def test_lists_docs(self):\n self.da(dap='c').run()\n assert self.tlh.msgs == [\n ('INFO', 'DAP c has these docs:'),\n ('INFO', 'LICENSE'),\n ('INFO', 'doc1'),\n ('INFO', 'something/foo/doc2'),\n ('INFO', 'Use \"da doc c \" to see a specific document')\n ]\n\n def test_displays_docs(self):\n # we only test displaying without \"less\" - e.g. simple logging\n flexmock(subprocess).should_receive('check_call').\\\n and_raise(subprocess.CalledProcessError, None, None)\n self.da(dap='c', doc='doc1').run()\n assert ('INFO', 'Bar!\\n') in self.tlh.msgs\n\n\nclass TestPkgSearchAction(object):\n\n @pytest.mark.parametrize('exc', [exceptions.DapiCommError, exceptions.DapiLocalError])\n def test_raising_exceptions(self, exc):\n flexmock(dapicli).should_receive('print_search').and_raise(exc)\n\n with pytest.raises(exceptions.ExecutionException):\n actions.PkgSearchAction(query='foo', noassistants=False, unstable=False,\n deactivated=False, minrank=0, mincount=0,\n allplatforms=False).run()\n\nclass TestPkgInstallAction(object):\n\n def setup_class(self):\n self.pkg = 'foo'\n self.exc_string = 'bar'\n\n @pytest.mark.parametrize(('isfile', 'method'), [\n (True, 'install_dap_from_path'),\n (False, 'install_dap')\n ])\n def test_pkg_install(self, isfile, method):\n flexmock(os.path).should_receive('isfile').with_args(self.pkg)\\\n .and_return(isfile).at_least().once()\n flexmock(dapicli).should_receive(method)\\\n .and_return([self.pkg]).at_least().once()\n\n # Install from path, everything goes well\n actions.PkgInstallAction(package=[self.pkg], force=False,\n reinstall=False, nodeps=False).run()\n\n def test_pkg_install_fails(self):\n flexmock(os.path).should_receive('isfile').with_args(self.pkg)\\\n .and_return(True).at_least().once()\n flexmock(dapicli).should_receive('install_dap_from_path')\\\n .and_raise(exceptions.DapiLocalError(self.exc_string)).at_least().once()\n\n with pytest.raises(exceptions.ExecutionException) as excinfo:\n actions.PkgInstallAction(package=[self.pkg], force=False,\n reinstall=False, nodeps=False).run()\n\n assert self.exc_string in str(excinfo.value)\n\n\nclass TestPkgUpdateAction(object):\n\n def test_pkg_update_all(self):\n '''Run update without args to update all, but everything is up to-date'''\n flexmock(dapicli).should_receive('get_installed_daps')\\\n .and_return(['foo']).at_least().once()\n flexmock(dapicli).should_receive('install_dap')\\\n .and_return([]).at_least().once()\n\n # Update all, everything is up to date\n actions.PkgUpdateAction(force=False, allpaths=False).run()\n\n def test_pkg_update_no_dapi(self):\n '''Run update of package that is not on Dapi'''\n flexmock(dapicli).should_receive('metadap')\\\n .and_return(None).at_least().once()\n\n with pytest.raises(exceptions.ExecutionException) as excinfo:\n actions.PkgUpdateAction(package=['foo'], force=False, allpaths=False).run()\n\n assert 'foo not found' in str(excinfo.value)\n\n def test_pkg_update_no_installed(self):\n '''Run update of package that is not installed'''\n flexmock(dapicli).should_receive('_get_metadap_dap')\\\n .and_return(({}, {'version': '0.0.1'})).at_least().once()\n flexmock(dapicli).should_receive('get_installed_version_of')\\\n .and_return(None).at_least().once()\n\n with pytest.raises(exceptions.ExecutionException) as excinfo:\n actions.PkgUpdateAction(package=['foo'], force=False, allpaths=False).run()\n\n assert 'Cannot update not yet installed DAP' in str(excinfo.value)\n\n\n@pytest.mark.parametrize('action', [\n actions.PkgUninstallAction,\n actions.PkgRemoveAction\n])\nclass TestPkgUninstallAction(object):\n\n def test_pkg_uninstall_dependent(self, action):\n '''Uninstall two packages, but the first depend on the latter'''\n flexmock(dapicli).should_receive('uninstall_dap')\\\n .and_return(['first', 'second']).at_least().once()\n\n action(package=['first', 'second'], force=True, allpaths=False).run()\n\n def test_pkg_uninstall_not_installed(self, action):\n '''Uninstall package that is not installed'''\n flexmock(dapicli).should_receive('get_installed_daps')\\\n .and_return(['bar']).at_least().once()\n\n with pytest.raises(exceptions.ExecutionException) as excinfo:\n action(package=['foo'], force=True, allpaths=False).run()\n\n assert 'Cannot uninstall DAP foo' in str(excinfo.value)\n\n\nclass TestAutoCompleteAction(object):\n\n def setup_class(self):\n self.aca = actions.AutoCompleteAction\n\n self.fake_desc = [flexmock(name=n,\n get_subassistants=lambda: [],\n args=[]) for n in ['foo', 'bar', 'baz']]\n self.fake_arg = flexmock(flags=('--qux',), kwargs=dict())\n self.fake_crt = flexmock(name='crt',\n get_subassistants=lambda: self.fake_desc,\n args=[self.fake_arg])\n\n @pytest.mark.parametrize('path', ['', '--debug', '__debug'])\n def test_root_path(self, path, capsys):\n expected = set(['--debug', '--help', 'create', 'doc', 'extra', 'help',\n 'pkg', 'prepare', 'tweak', 'version'])\n\n self.aca(path=path).run()\n stdout, _ = capsys.readouterr()\n\n assert stdout\n assert expected.issubset(set(stdout.split()))\n\n @pytest.mark.parametrize('obj', [\n flexmock(get_subassistants=lambda: []),\n flexmock(get_subactions=lambda: [])\n ])\n def test_get_descendants(self, obj):\n self.aca._get_descendants(obj)\n\n @pytest.mark.parametrize('obj', [\n flexmock(get_subassistants=''),\n flexmock()\n ])\n def test_get_descendants_fails(self, obj):\n with pytest.raises(TypeError):\n self.aca._get_descendants(obj)\n\n @pytest.mark.parametrize('path', ['crt', 'crt --qux'])\n def test_assistants(self, path, capsys):\n aca = self.aca(path=path)\n flexmock(aca).should_receive('_assistants').and_return([self.fake_crt])\n\n aca.run()\n\n stdout, _ = capsys.readouterr()\n\n assert not _\n assert set([a.name for a in self.fake_desc] + \\\n [f for f in self.fake_arg.flags]).issubset(set(stdout.split()))\n\n @pytest.mark.parametrize(('long_name', 'short_name'), [\n ('create', 'crt'),\n ('tweak', 'twk'),\n ('twk', 'mod'),\n ('prepare', 'prep'),\n ('extra', 'task'),\n ])\n def test_aliases(self, long_name, short_name, capsys):\n self.aca(path=long_name).run()\n long_stdout, _ = capsys.readouterr()\n\n assert long_stdout\n\n self.aca(path=short_name).run()\n short_stdout, _ = capsys.readouterr()\n\n assert short_stdout\n assert long_stdout == short_stdout\n\n def test_filenames(self, capsys):\n self.aca(path='pkg info').run()\n stdout, _ = capsys.readouterr()\n\n assert '_FILENAMES' in stdout.split()\n\n def test_bad_input(self, capsys):\n self.aca(path='foo bar baz').run()\n stdout, _ = capsys.readouterr()\n\n assert not stdout.split()\n"},"license":{"kind":"string","value":"gpl-2.0"}}},{"rowIdx":2740,"cells":{"repo_name":{"kind":"string","value":"jjingrong/PONUS-1.2"},"path":{"kind":"string","value":"venv/build/django/django/db/models/options.py"},"copies":{"kind":"string","value":"104"},"size":{"kind":"string","value":"24269"},"content":{"kind":"string","value":"from __future__ import unicode_literals\n\nimport re\nfrom bisect import bisect\nimport warnings\n\nfrom django.conf import settings\nfrom django.db.models.fields.related import ManyToManyRel\nfrom django.db.models.fields import AutoField, FieldDoesNotExist\nfrom django.db.models.fields.proxy import OrderWrt\nfrom django.db.models.loading import get_models, app_cache_ready\nfrom django.utils import six\nfrom django.utils.functional import cached_property\nfrom django.utils.datastructures import SortedDict\nfrom django.utils.encoding import force_text, smart_text, python_2_unicode_compatible\nfrom django.utils.translation import activate, deactivate_all, get_language, string_concat\n\n# Calculate the verbose_name by converting from InitialCaps to \"lowercase with spaces\".\nget_verbose_name = lambda class_name: re.sub('(((?<=[a-z])[A-Z])|([A-Z](?![A-Z]|$)))', ' \\\\1', class_name).lower().strip()\n\nDEFAULT_NAMES = ('verbose_name', 'verbose_name_plural', 'db_table', 'ordering',\n 'unique_together', 'permissions', 'get_latest_by',\n 'order_with_respect_to', 'app_label', 'db_tablespace',\n 'abstract', 'managed', 'proxy', 'swappable', 'auto_created',\n 'index_together', 'select_on_save')\n\n\n@python_2_unicode_compatible\nclass Options(object):\n def __init__(self, meta, app_label=None):\n self.local_fields, self.local_many_to_many = [], []\n self.virtual_fields = []\n self.model_name, self.verbose_name = None, None\n self.verbose_name_plural = None\n self.db_table = ''\n self.ordering = []\n self.unique_together = []\n self.index_together = []\n self.select_on_save = False\n self.permissions = []\n self.object_name, self.app_label = None, app_label\n self.get_latest_by = None\n self.order_with_respect_to = None\n self.db_tablespace = settings.DEFAULT_TABLESPACE\n self.meta = meta\n self.pk = None\n self.has_auto_field, self.auto_field = False, None\n self.abstract = False\n self.managed = True\n self.proxy = False\n # For any class that is a proxy (including automatically created\n # classes for deferred object loading), proxy_for_model tells us\n # which class this model is proxying. Note that proxy_for_model\n # can create a chain of proxy models. For non-proxy models, the\n # variable is always None.\n self.proxy_for_model = None\n # For any non-abstract class, the concrete class is the model\n # in the end of the proxy_for_model chain. In particular, for\n # concrete models, the concrete_model is always the class itself.\n self.concrete_model = None\n self.swappable = None\n self.parents = SortedDict()\n self.auto_created = False\n\n # To handle various inheritance situations, we need to track where\n # managers came from (concrete or abstract base classes).\n self.abstract_managers = []\n self.concrete_managers = []\n\n # List of all lookups defined in ForeignKey 'limit_choices_to' options\n # from *other* models. Needed for some admin checks. Internal use only.\n self.related_fkey_lookups = []\n\n def contribute_to_class(self, cls, name):\n from django.db import connection\n from django.db.backends.util import truncate_name\n\n cls._meta = self\n self.model = cls\n self.installed = re.sub('\\.models$', '', cls.__module__) in settings.INSTALLED_APPS\n # First, construct the default values for these options.\n self.object_name = cls.__name__\n self.model_name = self.object_name.lower()\n self.verbose_name = get_verbose_name(self.object_name)\n\n # Next, apply any overridden values from 'class Meta'.\n if self.meta:\n meta_attrs = self.meta.__dict__.copy()\n for name in self.meta.__dict__:\n # Ignore any private attributes that Django doesn't care about.\n # NOTE: We can't modify a dictionary's contents while looping\n # over it, so we loop over the *original* dictionary instead.\n if name.startswith('_'):\n del meta_attrs[name]\n for attr_name in DEFAULT_NAMES:\n if attr_name in meta_attrs:\n setattr(self, attr_name, meta_attrs.pop(attr_name))\n elif hasattr(self.meta, attr_name):\n setattr(self, attr_name, getattr(self.meta, attr_name))\n\n # unique_together can be either a tuple of tuples, or a single\n # tuple of two strings. Normalize it to a tuple of tuples, so that\n # calling code can uniformly expect that.\n ut = meta_attrs.pop('unique_together', self.unique_together)\n if ut and not isinstance(ut[0], (tuple, list)):\n ut = (ut,)\n self.unique_together = ut\n\n # verbose_name_plural is a special case because it uses a 's'\n # by default.\n if self.verbose_name_plural is None:\n self.verbose_name_plural = string_concat(self.verbose_name, 's')\n\n # Any leftover attributes must be invalid.\n if meta_attrs != {}:\n raise TypeError(\"'class Meta' got invalid attribute(s): %s\" % ','.join(meta_attrs.keys()))\n else:\n self.verbose_name_plural = string_concat(self.verbose_name, 's')\n del self.meta\n\n # If the db_table wasn't provided, use the app_label + model_name.\n if not self.db_table:\n self.db_table = \"%s_%s\" % (self.app_label, self.model_name)\n self.db_table = truncate_name(self.db_table, connection.ops.max_name_length())\n\n @property\n def module_name(self):\n \"\"\"\n This property has been deprecated in favor of `model_name`. refs #19689\n \"\"\"\n warnings.warn(\n \"Options.module_name has been deprecated in favor of model_name\",\n PendingDeprecationWarning, stacklevel=2)\n return self.model_name\n\n def _prepare(self, model):\n if self.order_with_respect_to:\n self.order_with_respect_to = self.get_field(self.order_with_respect_to)\n self.ordering = ('_order',)\n model.add_to_class('_order', OrderWrt())\n else:\n self.order_with_respect_to = None\n\n if self.pk is None:\n if self.parents:\n # Promote the first parent link in lieu of adding yet another\n # field.\n field = next(six.itervalues(self.parents))\n # Look for a local field with the same name as the\n # first parent link. If a local field has already been\n # created, use it instead of promoting the parent\n already_created = [fld for fld in self.local_fields if fld.name == field.name]\n if already_created:\n field = already_created[0]\n field.primary_key = True\n self.setup_pk(field)\n else:\n auto = AutoField(verbose_name='ID', primary_key=True,\n auto_created=True)\n model.add_to_class('id', auto)\n\n def add_field(self, field):\n # Insert the given field in the order in which it was created, using\n # the \"creation_counter\" attribute of the field.\n # Move many-to-many related fields from self.fields into\n # self.many_to_many.\n if field.rel and isinstance(field.rel, ManyToManyRel):\n self.local_many_to_many.insert(bisect(self.local_many_to_many, field), field)\n if hasattr(self, '_m2m_cache'):\n del self._m2m_cache\n else:\n self.local_fields.insert(bisect(self.local_fields, field), field)\n self.setup_pk(field)\n if hasattr(self, '_field_cache'):\n del self._field_cache\n del self._field_name_cache\n # The fields, concrete_fields and local_concrete_fields are\n # implemented as cached properties for performance reasons.\n # The attrs will not exists if the cached property isn't\n # accessed yet, hence the try-excepts.\n try:\n del self.fields\n except AttributeError:\n pass\n try:\n del self.concrete_fields\n except AttributeError:\n pass\n try:\n del self.local_concrete_fields\n except AttributeError:\n pass\n\n if hasattr(self, '_name_map'):\n del self._name_map\n\n def add_virtual_field(self, field):\n self.virtual_fields.append(field)\n\n def setup_pk(self, field):\n if not self.pk and field.primary_key:\n self.pk = field\n field.serialize = False\n\n def pk_index(self):\n \"\"\"\n Returns the index of the primary key field in the self.concrete_fields\n list.\n \"\"\"\n return self.concrete_fields.index(self.pk)\n\n def setup_proxy(self, target):\n \"\"\"\n Does the internal setup so that the current model is a proxy for\n \"target\".\n \"\"\"\n self.pk = target._meta.pk\n self.proxy_for_model = target\n self.db_table = target._meta.db_table\n\n def __repr__(self):\n return '' % self.object_name\n\n def __str__(self):\n return \"%s.%s\" % (smart_text(self.app_label), smart_text(self.model_name))\n\n def verbose_name_raw(self):\n \"\"\"\n There are a few places where the untranslated verbose name is needed\n (so that we get the same value regardless of currently active\n locale).\n \"\"\"\n lang = get_language()\n deactivate_all()\n raw = force_text(self.verbose_name)\n activate(lang)\n return raw\n verbose_name_raw = property(verbose_name_raw)\n\n def _swapped(self):\n \"\"\"\n Has this model been swapped out for another? If so, return the model\n name of the replacement; otherwise, return None.\n\n For historical reasons, model name lookups using get_model() are\n case insensitive, so we make sure we are case insensitive here.\n \"\"\"\n if self.swappable:\n model_label = '%s.%s' % (self.app_label, self.model_name)\n swapped_for = getattr(settings, self.swappable, None)\n if swapped_for:\n try:\n swapped_label, swapped_object = swapped_for.split('.')\n except ValueError:\n # setting not in the format app_label.model_name\n # raising ImproperlyConfigured here causes problems with\n # test cleanup code - instead it is raised in get_user_model\n # or as part of validation.\n return swapped_for\n\n if '%s.%s' % (swapped_label, swapped_object.lower()) not in (None, model_label):\n return swapped_for\n return None\n swapped = property(_swapped)\n\n @cached_property\n def fields(self):\n \"\"\"\n The getter for self.fields. This returns the list of field objects\n available to this model (including through parent models).\n\n Callers are not permitted to modify this list, since it's a reference\n to this instance (not a copy).\n \"\"\"\n try:\n self._field_name_cache\n except AttributeError:\n self._fill_fields_cache()\n return self._field_name_cache\n\n @cached_property\n def concrete_fields(self):\n return [f for f in self.fields if f.column is not None]\n\n @cached_property\n def local_concrete_fields(self):\n return [f for f in self.local_fields if f.column is not None]\n\n def get_fields_with_model(self):\n \"\"\"\n Returns a sequence of (field, model) pairs for all fields. The \"model\"\n element is None for fields on the current model. Mostly of use when\n constructing queries so that we know which model a field belongs to.\n \"\"\"\n try:\n self._field_cache\n except AttributeError:\n self._fill_fields_cache()\n return self._field_cache\n\n def get_concrete_fields_with_model(self):\n return [(field, model) for field, model in self.get_fields_with_model() if\n field.column is not None]\n\n def _fill_fields_cache(self):\n cache = []\n for parent in self.parents:\n for field, model in parent._meta.get_fields_with_model():\n if model:\n cache.append((field, model))\n else:\n cache.append((field, parent))\n cache.extend([(f, None) for f in self.local_fields])\n self._field_cache = tuple(cache)\n self._field_name_cache = [x for x, _ in cache]\n\n def _many_to_many(self):\n try:\n self._m2m_cache\n except AttributeError:\n self._fill_m2m_cache()\n return list(self._m2m_cache)\n many_to_many = property(_many_to_many)\n\n def get_m2m_with_model(self):\n \"\"\"\n The many-to-many version of get_fields_with_model().\n \"\"\"\n try:\n self._m2m_cache\n except AttributeError:\n self._fill_m2m_cache()\n return list(six.iteritems(self._m2m_cache))\n\n def _fill_m2m_cache(self):\n cache = SortedDict()\n for parent in self.parents:\n for field, model in parent._meta.get_m2m_with_model():\n if model:\n cache[field] = model\n else:\n cache[field] = parent\n for field in self.local_many_to_many:\n cache[field] = None\n self._m2m_cache = cache\n\n def get_field(self, name, many_to_many=True):\n \"\"\"\n Returns the requested field by name. Raises FieldDoesNotExist on error.\n \"\"\"\n to_search = (self.fields + self.many_to_many) if many_to_many else self.fields\n for f in to_search:\n if f.name == name:\n return f\n raise FieldDoesNotExist('%s has no field named %r' % (self.object_name, name))\n\n def get_field_by_name(self, name):\n \"\"\"\n Returns the (field_object, model, direct, m2m), where field_object is\n the Field instance for the given name, model is the model containing\n this field (None for local fields), direct is True if the field exists\n on this model, and m2m is True for many-to-many relations. When\n 'direct' is False, 'field_object' is the corresponding RelatedObject\n for this field (since the field doesn't have an instance associated\n with it).\n\n Uses a cache internally, so after the first access, this is very fast.\n \"\"\"\n try:\n try:\n return self._name_map[name]\n except AttributeError:\n cache = self.init_name_map()\n return cache[name]\n except KeyError:\n raise FieldDoesNotExist('%s has no field named %r'\n % (self.object_name, name))\n\n def get_all_field_names(self):\n \"\"\"\n Returns a list of all field names that are possible for this model\n (including reverse relation names). This is used for pretty printing\n debugging output (a list of choices), so any internal-only field names\n are not included.\n \"\"\"\n try:\n cache = self._name_map\n except AttributeError:\n cache = self.init_name_map()\n names = sorted(cache.keys())\n # Internal-only names end with \"+\" (symmetrical m2m related names being\n # the main example). Trim them.\n return [val for val in names if not val.endswith('+')]\n\n def init_name_map(self):\n \"\"\"\n Initialises the field name -> field object mapping.\n \"\"\"\n cache = {}\n # We intentionally handle related m2m objects first so that symmetrical\n # m2m accessor names can be overridden, if necessary.\n for f, model in self.get_all_related_m2m_objects_with_model():\n cache[f.field.related_query_name()] = (f, model, False, True)\n for f, model in self.get_all_related_objects_with_model():\n cache[f.field.related_query_name()] = (f, model, False, False)\n for f, model in self.get_m2m_with_model():\n cache[f.name] = (f, model, True, True)\n for f, model in self.get_fields_with_model():\n cache[f.name] = (f, model, True, False)\n for f in self.virtual_fields:\n if hasattr(f, 'related'):\n cache[f.name] = (f.related, None if f.model == self.model else f.model, True, False)\n if app_cache_ready():\n self._name_map = cache\n return cache\n\n def get_add_permission(self):\n \"\"\"\n This method has been deprecated in favor of\n `django.contrib.auth.get_permission_codename`. refs #20642\n \"\"\"\n warnings.warn(\n \"`Options.get_add_permission` has been deprecated in favor \"\n \"of `django.contrib.auth.get_permission_codename`.\",\n PendingDeprecationWarning, stacklevel=2)\n return 'add_%s' % self.model_name\n\n def get_change_permission(self):\n \"\"\"\n This method has been deprecated in favor of\n `django.contrib.auth.get_permission_codename`. refs #20642\n \"\"\"\n warnings.warn(\n \"`Options.get_change_permission` has been deprecated in favor \"\n \"of `django.contrib.auth.get_permission_codename`.\",\n PendingDeprecationWarning, stacklevel=2)\n return 'change_%s' % self.model_name\n\n def get_delete_permission(self):\n \"\"\"\n This method has been deprecated in favor of\n `django.contrib.auth.get_permission_codename`. refs #20642\n \"\"\"\n warnings.warn(\n \"`Options.get_delete_permission` has been deprecated in favor \"\n \"of `django.contrib.auth.get_permission_codename`.\",\n PendingDeprecationWarning, stacklevel=2)\n return 'delete_%s' % self.model_name\n\n def get_all_related_objects(self, local_only=False, include_hidden=False,\n include_proxy_eq=False):\n return [k for k, v in self.get_all_related_objects_with_model(\n local_only=local_only, include_hidden=include_hidden,\n include_proxy_eq=include_proxy_eq)]\n\n def get_all_related_objects_with_model(self, local_only=False,\n include_hidden=False,\n include_proxy_eq=False):\n \"\"\"\n Returns a list of (related-object, model) pairs. Similar to\n get_fields_with_model().\n \"\"\"\n try:\n self._related_objects_cache\n except AttributeError:\n self._fill_related_objects_cache()\n predicates = []\n if local_only:\n predicates.append(lambda k, v: not v)\n if not include_hidden:\n predicates.append(lambda k, v: not k.field.rel.is_hidden())\n cache = (self._related_objects_proxy_cache if include_proxy_eq\n else self._related_objects_cache)\n return [t for t in cache.items() if all(p(*t) for p in predicates)]\n\n def _fill_related_objects_cache(self):\n cache = SortedDict()\n parent_list = self.get_parent_list()\n for parent in self.parents:\n for obj, model in parent._meta.get_all_related_objects_with_model(include_hidden=True):\n if (obj.field.creation_counter < 0 or obj.field.rel.parent_link) and obj.model not in parent_list:\n continue\n if not model:\n cache[obj] = parent\n else:\n cache[obj] = model\n # Collect also objects which are in relation to some proxy child/parent of self.\n proxy_cache = cache.copy()\n for klass in get_models(include_auto_created=True, only_installed=False):\n if not klass._meta.swapped:\n for f in klass._meta.local_fields:\n if f.rel and not isinstance(f.rel.to, six.string_types) and f.generate_reverse_relation:\n if self == f.rel.to._meta:\n cache[f.related] = None\n proxy_cache[f.related] = None\n elif self.concrete_model == f.rel.to._meta.concrete_model:\n proxy_cache[f.related] = None\n self._related_objects_cache = cache\n self._related_objects_proxy_cache = proxy_cache\n\n def get_all_related_many_to_many_objects(self, local_only=False):\n try:\n cache = self._related_many_to_many_cache\n except AttributeError:\n cache = self._fill_related_many_to_many_cache()\n if local_only:\n return [k for k, v in cache.items() if not v]\n return list(cache)\n\n def get_all_related_m2m_objects_with_model(self):\n \"\"\"\n Returns a list of (related-m2m-object, model) pairs. Similar to\n get_fields_with_model().\n \"\"\"\n try:\n cache = self._related_many_to_many_cache\n except AttributeError:\n cache = self._fill_related_many_to_many_cache()\n return list(six.iteritems(cache))\n\n def _fill_related_many_to_many_cache(self):\n cache = SortedDict()\n parent_list = self.get_parent_list()\n for parent in self.parents:\n for obj, model in parent._meta.get_all_related_m2m_objects_with_model():\n if obj.field.creation_counter < 0 and obj.model not in parent_list:\n continue\n if not model:\n cache[obj] = parent\n else:\n cache[obj] = model\n for klass in get_models(only_installed=False):\n if not klass._meta.swapped:\n for f in klass._meta.local_many_to_many:\n if (f.rel\n and not isinstance(f.rel.to, six.string_types)\n and self == f.rel.to._meta):\n cache[f.related] = None\n if app_cache_ready():\n self._related_many_to_many_cache = cache\n return cache\n\n def get_base_chain(self, model):\n \"\"\"\n Returns a list of parent classes leading to 'model' (order from closet\n to most distant ancestor). This has to handle the case were 'model' is\n a granparent or even more distant relation.\n \"\"\"\n if not self.parents:\n return None\n if model in self.parents:\n return [model]\n for parent in self.parents:\n res = parent._meta.get_base_chain(model)\n if res:\n res.insert(0, parent)\n return res\n return None\n\n def get_parent_list(self):\n \"\"\"\n Returns a list of all the ancestor of this model as a list. Useful for\n determining if something is an ancestor, regardless of lineage.\n \"\"\"\n result = set()\n for parent in self.parents:\n result.add(parent)\n result.update(parent._meta.get_parent_list())\n return result\n\n def get_ancestor_link(self, ancestor):\n \"\"\"\n Returns the field on the current model which points to the given\n \"ancestor\". This is possible an indirect link (a pointer to a parent\n model, which points, eventually, to the ancestor). Used when\n constructing table joins for model inheritance.\n\n Returns None if the model isn't an ancestor of this one.\n \"\"\"\n if ancestor in self.parents:\n return self.parents[ancestor]\n for parent in self.parents:\n # Tries to get a link field from the immediate parent\n parent_link = parent._meta.get_ancestor_link(ancestor)\n if parent_link:\n # In case of a proxied model, the first link\n # of the chain to the ancestor is that parent\n # links\n return self.parents[parent] or parent_link\n"},"license":{"kind":"string","value":"mit"}}},{"rowIdx":2741,"cells":{"repo_name":{"kind":"string","value":"jwalgran/otm-core"},"path":{"kind":"string","value":"opentreemap/treemap/lib/user.py"},"copies":{"kind":"string","value":"4"},"size":{"kind":"string","value":"8019"},"content":{"kind":"string","value":"# -*- coding: utf-8 -*-\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\nfrom __future__ import division\n\nfrom django.db.models import Q\n\nfrom treemap.audit import Audit, Authorizable, get_auditable_class\nfrom treemap.models import Instance, MapFeature, InstanceUser, User\nfrom treemap.util import get_filterable_audit_models\nfrom treemap.lib.object_caches import udf_defs\nfrom treemap.udf import UDFModel\n\n\ndef _instance_ids_edited_by(user):\n return Audit.objects.filter(user=user)\\\n .values_list('instance_id', flat=True)\\\n .exclude(instance_id=None)\\\n .distinct()\n\n\nPAGE_DEFAULT = 20\nALLOWED_MODELS = get_filterable_audit_models()\n\n\ndef get_audits(logged_in_user, instance, query_vars, user=None,\n models=ALLOWED_MODELS, model_id=None, start_id=None,\n prev_start_ids=[], page_size=PAGE_DEFAULT, exclude_pending=True,\n should_count=False):\n if instance:\n if instance.is_accessible_by(logged_in_user):\n instances = Instance.objects.filter(pk=instance.pk)\n else:\n instances = Instance.objects.none()\n # If we didn't specify an instance we only want to\n # show audits where the user has permission\n else:\n instances = Instance.objects\\\n .filter(user_accessible_instance_filter(logged_in_user))\n if user:\n instances = instances.filter(pk__in=_instance_ids_edited_by(user))\n instances = instances.distinct()\n\n if not instances.exists():\n # Force no results\n return {'audits': Audit.objects.none(),\n 'total_count': 0,\n 'next_page': None,\n 'prev_page': None}\n\n map_feature_models = set(MapFeature.subclass_dict().keys())\n model_filter = Q()\n # We only want to show the TreePhoto's image, not other fields\n # and we want to do it automatically if 'Tree' was specified as\n # a model. The same goes for MapFeature(s) <-> MapFeaturePhoto\n # There is no need to check permissions, because photos are always visible\n if 'Tree' in models:\n model_filter = model_filter | Q(model='TreePhoto', field='image')\n if map_feature_models.intersection(models):\n model_filter = model_filter | Q(model='MapFeaturePhoto', field='image')\n\n for inst in instances:\n eligible_models = ({'Tree', 'TreePhoto', 'MapFeaturePhoto'} |\n set(inst.map_feature_types)) & set(models)\n\n if logged_in_user == user:\n eligible_udfs = {'udf:%s' % udf.id for udf in udf_defs(inst)\n if udf.model_type in eligible_models\n and udf.iscollection}\n\n # The logged-in user can see all their own edits\n model_filter = model_filter | Q(\n instance=inst, model__in=(eligible_models | eligible_udfs))\n\n else:\n # Filter other users' edits by their visibility to the\n # logged-in user\n for model in eligible_models:\n ModelClass = get_auditable_class(model)\n fake_model = ModelClass(instance=inst)\n if issubclass(ModelClass, Authorizable):\n visible_fields = fake_model.visible_fields(logged_in_user)\n model_filter = model_filter |\\\n Q(model=model, field__in=visible_fields, instance=inst)\n else:\n model_filter = model_filter | Q(model=model, instance=inst)\n\n if issubclass(ModelClass, UDFModel):\n model_collection_udfs_audit_names = (\n fake_model.visible_collection_udfs_audit_names(\n logged_in_user))\n\n model_filter = model_filter | (\n Q(model__in=model_collection_udfs_audit_names))\n\n udf_bookkeeping_fields = Q(\n model__startswith='udf:',\n field__in=('id', 'model_id', 'field_definition'))\n\n audits = (Audit.objects\n .filter(model_filter)\n .filter(instance__in=instances)\n .select_related('instance')\n .exclude(udf_bookkeeping_fields)\n .exclude(user=User.system_user())\n .order_by('-pk'))\n\n if user:\n audits = audits.filter(user=user)\n if model_id:\n audits = audits.filter(model_id=model_id)\n if exclude_pending:\n audits = audits.exclude(requires_auth=True, ref__isnull=True)\n\n # Slicing the QuerySet uses a SQL Limit, which has proven to be quite slow.\n # By relying on the fact the our list is ordered by primary key from newest\n # to oldest, we can rely on the index on the primary key, which is faster.\n if start_id is not None:\n audits = audits.filter(pk__lte=start_id)\n\n total_count = audits.count() if should_count else 0\n audits = audits[:page_size]\n\n # Coerce the queryset into a list so we can get the last audit row on the\n # current page\n audits = list(audits)\n\n # We are using len(audits) instead of audits.count() because we\n # have already realized the queryset at this point\n if len(audits) == page_size:\n query_vars.setlist('prev', prev_start_ids + [audits[0].pk])\n query_vars['start'] = audits[-1].pk - 1\n next_page = \"?\" + query_vars.urlencode()\n else:\n next_page = None\n\n if prev_start_ids:\n if len(prev_start_ids) == 1:\n del query_vars['prev']\n del query_vars['start']\n else:\n prev_start_id = prev_start_ids.pop()\n query_vars.setlist('prev', prev_start_ids)\n query_vars['start'] = prev_start_id\n prev_page = \"?\" + query_vars.urlencode()\n else:\n prev_page = None\n\n return {'audits': audits,\n 'total_count': total_count,\n 'next_page': next_page,\n 'prev_page': prev_page}\n\n\ndef get_audits_params(request):\n PAGE_MAX = 100\n\n r = request.GET\n\n page_size = min(int(r.get('page_size', PAGE_DEFAULT)), PAGE_MAX)\n start_id = r.get('start', None)\n if start_id is not None:\n start_id = int(start_id)\n\n prev_start_ids = [int(pk) for pk in r.getlist('prev')]\n\n models = r.getlist('models', default=ALLOWED_MODELS)\n\n if models:\n for model in models:\n if model not in ALLOWED_MODELS:\n raise Exception(\"Invalid model: %s\" % model)\n\n model_id = r.get('model_id', None)\n\n if model_id is not None and len(models) != 1:\n raise Exception(\"You must specific one and only model \"\n \"when looking up by id\")\n\n exclude_pending = r.get('exclude_pending', \"false\") == \"true\"\n\n return {'start_id': start_id, 'prev_start_ids': prev_start_ids,\n 'page_size': page_size, 'models': models, 'model_id': model_id,\n 'exclude_pending': exclude_pending}\n\n\ndef user_accessible_instance_filter(logged_in_user):\n public = Q(is_public=True)\n if logged_in_user is not None and not logged_in_user.is_anonymous():\n private_with_access = Q(instanceuser__user=logged_in_user)\n\n instance_filter = public | private_with_access\n else:\n instance_filter = public\n return instance_filter\n\n\ndef get_user_instances(logged_in_user, user, current_instance=None):\n\n # Which instances can the logged-in user see?\n instance_filter = (user_accessible_instance_filter(logged_in_user))\n\n user_instance_ids = (InstanceUser.objects\n .filter(user_id=user.pk)\n .values_list('instance_id', flat=True))\n\n instance_filter = Q(instance_filter, Q(pk__in=user_instance_ids))\n\n # The logged-in user should see the current instance in their own list\n if current_instance and logged_in_user == user:\n instance_filter = instance_filter | Q(pk=current_instance.id)\n\n return (Instance.objects\n .filter(instance_filter)\n .distinct()\n .order_by('name'))\n"},"license":{"kind":"string","value":"gpl-3.0"}}},{"rowIdx":2742,"cells":{"repo_name":{"kind":"string","value":"kienpham2000/ansible-modules-core"},"path":{"kind":"string","value":"packaging/rpm_key.py"},"copies":{"kind":"string","value":"60"},"size":{"kind":"string","value":"7339"},"content":{"kind":"string","value":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\n# Ansible module to import third party repo keys to your rpm db\n# (c) 2013, Héctor Acosta \n#\n# This file is part of Ansible\n#\n# Ansible is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# Ansible is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with Ansible. If not, see .\n\nDOCUMENTATION = '''\n---\nmodule: rpm_key\nauthor: Hector Acosta \nshort_description: Adds or removes a gpg key from the rpm db\ndescription:\n - Adds or removes (rpm --import) a gpg key to your rpm database.\nversion_added: \"1.3\"\noptions:\n key:\n required: true\n default: null\n aliases: []\n description:\n - Key that will be modified. Can be a url, a file, or a keyid if the key already exists in the database.\n state:\n required: false\n default: \"present\"\n choices: [present, absent]\n description:\n - Wheather the key will be imported or removed from the rpm db.\n validate_certs:\n description:\n - If C(no) and the C(key) is a url starting with https, SSL certificates will not be validated. This should only be used\n on personally controlled sites using self-signed certificates.\n required: false\n default: 'yes'\n choices: ['yes', 'no']\n\n'''\n\nEXAMPLES = '''\n# Example action to import a key from a url\n- rpm_key: state=present key=http://apt.sw.be/RPM-GPG-KEY.dag.txt\n\n# Example action to import a key from a file\n- rpm_key: state=present key=/path/to/key.gpg\n\n# Example action to ensure a key is not present in the db\n- rpm_key: state=absent key=DEADB33F\n'''\nimport syslog\nimport os.path\nimport re\nimport tempfile\n\ndef is_pubkey(string):\n \"\"\"Verifies if string is a pubkey\"\"\"\n pgp_regex = \".*?(-----BEGIN PGP PUBLIC KEY BLOCK-----.*?-----END PGP PUBLIC KEY BLOCK-----).*\"\n return re.match(pgp_regex, string, re.DOTALL)\n\nclass RpmKey:\n\n def __init__(self, module):\n self.syslogging = False\n # If the key is a url, we need to check if it's present to be idempotent,\n # to do that, we need to check the keyid, which we can get from the armor.\n keyfile = None\n should_cleanup_keyfile = False\n self.module = module\n self.rpm = self.module.get_bin_path('rpm', True)\n state = module.params['state']\n key = module.params['key']\n\n if '://' in key:\n keyfile = self.fetch_key(key)\n keyid = self.getkeyid(keyfile)\n should_cleanup_keyfile = True\n elif self.is_keyid(key):\n keyid = key\n elif os.path.isfile(key):\n keyfile = key\n keyid = self.getkeyid(keyfile)\n else:\n self.module.fail_json(msg=\"Not a valid key %s\" % key)\n keyid = self.normalize_keyid(keyid)\n\n if state == 'present':\n if self.is_key_imported(keyid):\n module.exit_json(changed=False)\n else:\n if not keyfile:\n self.module.fail_json(msg=\"When importing a key, a valid file must be given\")\n self.import_key(keyfile, dryrun=module.check_mode)\n if should_cleanup_keyfile:\n self.module.cleanup(keyfile)\n module.exit_json(changed=True)\n else:\n if self.is_key_imported(keyid):\n self.drop_key(keyid, dryrun=module.check_mode)\n module.exit_json(changed=True)\n else:\n module.exit_json(changed=False)\n\n\n def fetch_key(self, url):\n \"\"\"Downloads a key from url, returns a valid path to a gpg key\"\"\"\n try:\n rsp, info = fetch_url(self.module, url)\n key = rsp.read()\n if not is_pubkey(key):\n self.module.fail_json(msg=\"Not a public key: %s\" % url)\n tmpfd, tmpname = tempfile.mkstemp()\n tmpfile = os.fdopen(tmpfd, \"w+b\")\n tmpfile.write(key)\n tmpfile.close()\n return tmpname\n except urllib2.URLError, e:\n self.module.fail_json(msg=str(e))\n\n def normalize_keyid(self, keyid):\n \"\"\"Ensure a keyid doesn't have a leading 0x, has leading or trailing whitespace, and make sure is lowercase\"\"\"\n ret = keyid.strip().lower()\n if ret.startswith('0x'):\n return ret[2:]\n elif ret.startswith('0X'):\n return ret[2:]\n else:\n return ret\n\n def getkeyid(self, keyfile):\n gpg = self.module.get_bin_path('gpg', True)\n stdout, stderr = self.execute_command([gpg, '--no-tty', '--batch', '--with-colons', '--fixed-list-mode', '--list-packets', keyfile])\n for line in stdout.splitlines():\n line = line.strip()\n if line.startswith(':signature packet:'):\n # We want just the last 8 characters of the keyid\n keyid = line.split()[-1].strip()[8:]\n return keyid\n self.json_fail(msg=\"Unexpected gpg output\")\n\n def is_keyid(self, keystr):\n \"\"\"Verifies if a key, as provided by the user is a keyid\"\"\"\n return re.match('(0x)?[0-9a-f]{8}', keystr, flags=re.IGNORECASE)\n\n def execute_command(self, cmd):\n if self.syslogging:\n syslog.openlog('ansible-%s' % os.path.basename(__file__))\n syslog.syslog(syslog.LOG_NOTICE, 'Command %s' % '|'.join(cmd))\n rc, stdout, stderr = self.module.run_command(cmd)\n if rc != 0:\n self.module.fail_json(msg=stderr)\n return stdout, stderr\n\n def is_key_imported(self, keyid):\n stdout, stderr = self.execute_command([self.rpm, '-qa', 'gpg-pubkey'])\n for line in stdout.splitlines():\n line = line.strip()\n if not line:\n continue\n match = re.match('gpg-pubkey-([0-9a-f]+)-([0-9a-f]+)', line)\n if not match:\n self.module.fail_json(msg=\"rpm returned unexpected output [%s]\" % line)\n else:\n if keyid == match.group(1):\n return True\n return False\n\n def import_key(self, keyfile, dryrun=False):\n if not dryrun:\n self.execute_command([self.rpm, '--import', keyfile])\n\n def drop_key(self, key, dryrun=False):\n if not dryrun:\n self.execute_command([self.rpm, '--erase', '--allmatches', \"gpg-pubkey-%s\" % key])\n\n\ndef main():\n module = AnsibleModule(\n argument_spec = dict(\n state=dict(default='present', choices=['present', 'absent'], type='str'),\n key=dict(required=True, type='str'),\n validate_certs=dict(default='yes', type='bool'),\n ),\n supports_check_mode=True\n )\n\n RpmKey(module)\n\n\n\n# import module snippets\nfrom ansible.module_utils.basic import *\nfrom ansible.module_utils.urls import *\nmain()\n"},"license":{"kind":"string","value":"gpl-3.0"}}},{"rowIdx":2743,"cells":{"repo_name":{"kind":"string","value":"bartQu9/fallenmua"},"path":{"kind":"string","value":"resolvers.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"3740"},"content":{"kind":"string","value":"from urllib.error import URLError, HTTPError\nfrom xml.dom import minidom\nfrom dns import resolver\nimport urllib.request\nimport logging\n\n\ndef parse_thunderbird_autoconfig(xml_autoconfig):\n mx_servers = []\n\n dom_tree = minidom.parseString(xml_autoconfig)\n c_nodes = dom_tree.childNodes\n\n for i in c_nodes[0].getElementsByTagName(\"outgoingServer\"):\n try:\n curr_hostname = i.getElementsByTagName(\"hostname\")[0].childNodes[0].toxml().lower()\n curr_port = int(i.getElementsByTagName(\"port\")[0].childNodes[0].toxml())\n curr_sock_type = i.getElementsByTagName(\"socketType\")[0].childNodes[0].toxml().lower()\n curr_username_type = i.getElementsByTagName(\"username\")[0].childNodes[0].toxml()\n curr_auth_method = i.getElementsByTagName(\"authentication\")[0].childNodes[0].toxml().lower()\n except IndexError:\n logging.error(\"Bad autoconfiguration file in ISPDB\")\n return None\n\n mx_servers.append({'hostname': curr_hostname, 'port': curr_port, 'sock_type': curr_sock_type,\n 'username_type': curr_username_type, 'auth_method': curr_auth_method})\n\n if mx_servers:\n return mx_servers\n else:\n return None\n\n\ndef get_mx_from_ispdb(domain, _timeout=2):\n \"\"\"\n Search for MX servers in Mozilla ISPDB.\n :param _timeout: resource connection timeout\n :param domain: a str FQDN\n :return: List of tuples consists of mx server and listening port\n \"\"\"\n try:\n logging.debug(\"Connecting to the Mozilla ISPDB\")\n xml_config = urllib.request.urlopen(\"https://autoconfig.thunderbird.net/autoconfig/v1.1/{0}\".\n format(domain), timeout=_timeout).read()\n logging.debug(\"Fetched autoconfigure XML file from Mozilla ISPDB\")\n except HTTPError:\n logging.info(\"No data for domain {0} in the Mozilla ISPDB\".format(domain))\n return None\n except URLError as err:\n logging.warning(\"Unable to connect with the Mozilla ISPDB, reason: {0}\".format(err))\n return None\n\n mx_servers = parse_thunderbird_autoconfig(xml_config)\n hostnames = [mx['hostname'] for mx in mx_servers]\n\n logging.debug(\"MX servers from Mozilla ISPDB: {0}\".format(hostnames))\n\n return mx_servers\n\n\ndef get_mx_from_isp(domain, _timeout=4):\n try:\n logging.debug(\"Connecting to the ISP autoconfig\")\n xml_config = urllib.request.urlopen(\"http://autoconfig.{0}/mail/config-v1.1.xml\".format(domain),\n timeout=_timeout).read()\n logging.debug(\"Fetched autoconfigure XML file from autoconfig.{0}/mail/config-v1.1.xml\".format(domain))\n except (HTTPError, URLError):\n logging.info(\"No data on autoconfig.{0}\".format(domain))\n return None\n\n mx_servers = parse_thunderbird_autoconfig(xml_config)\n hostnames = [mx['hostname'] for mx in mx_servers]\n\n logging.debug(\"MX servers from autoconfig.{0}: {1}\".format(domain, hostnames))\n\n return mx_servers\n\n\ndef get_mx_from_dns(domain):\n mx_servers = []\n\n try:\n _tmp_mx = []\n for mx in resolver.query(domain, \"MX\"):\n _tmp_mx.append(mx.to_text().split(\" \"))\n logging.info(\"Found {0} MX servers in DNS zone\".format(len(_tmp_mx)))\n _tmp_mx.sort() # sort MX's by priority\n\n except resolver.NXDOMAIN:\n logging.error(\"Cannot resolve domain name \".format(domain))\n return None\n\n for mx in _tmp_mx:\n for port in (587, 465, 25): # Adding commonly known SMTP ports\n mx_servers.append({'hostname': mx[1], 'port': port, 'sock_type': None, 'username_type': None,\n 'auth_method': None})\n\n return mx_servers\n"},"license":{"kind":"string","value":"gpl-3.0"}}},{"rowIdx":2744,"cells":{"repo_name":{"kind":"string","value":"AnimeshSinha1309/WebsiteEdunet"},"path":{"kind":"string","value":"WebsiteEdunet/env/Lib/site-packages/django/shortcuts.py"},"copies":{"kind":"string","value":"135"},"size":{"kind":"string","value":"7957"},"content":{"kind":"string","value":"\"\"\"\nThis module collects helper functions and classes that \"span\" multiple levels\nof MVC. In other words, these functions/classes introduce controlled coupling\nfor convenience's sake.\n\"\"\"\n\nimport warnings\n\nfrom django.core import urlresolvers\nfrom django.db.models.base import ModelBase\nfrom django.db.models.manager import Manager\nfrom django.db.models.query import QuerySet\nfrom django.http import (\n Http404, HttpResponse, HttpResponsePermanentRedirect, HttpResponseRedirect,\n)\nfrom django.template import RequestContext, loader\nfrom django.template.context import _current_app_undefined\nfrom django.template.engine import (\n _context_instance_undefined, _dictionary_undefined, _dirs_undefined,\n)\nfrom django.utils import six\nfrom django.utils.deprecation import RemovedInDjango110Warning\nfrom django.utils.encoding import force_text\nfrom django.utils.functional import Promise\n\n\ndef render_to_response(template_name, context=None,\n context_instance=_context_instance_undefined,\n content_type=None, status=None, dirs=_dirs_undefined,\n dictionary=_dictionary_undefined, using=None):\n \"\"\"\n Returns a HttpResponse whose content is filled with the result of calling\n django.template.loader.render_to_string() with the passed arguments.\n \"\"\"\n if (context_instance is _context_instance_undefined\n and dirs is _dirs_undefined\n and dictionary is _dictionary_undefined):\n # No deprecated arguments were passed - use the new code path\n content = loader.render_to_string(template_name, context, using=using)\n\n else:\n # Some deprecated arguments were passed - use the legacy code path\n content = loader.render_to_string(\n template_name, context, context_instance, dirs, dictionary,\n using=using)\n\n return HttpResponse(content, content_type, status)\n\n\ndef render(request, template_name, context=None,\n context_instance=_context_instance_undefined,\n content_type=None, status=None, current_app=_current_app_undefined,\n dirs=_dirs_undefined, dictionary=_dictionary_undefined,\n using=None):\n \"\"\"\n Returns a HttpResponse whose content is filled with the result of calling\n django.template.loader.render_to_string() with the passed arguments.\n Uses a RequestContext by default.\n \"\"\"\n if (context_instance is _context_instance_undefined\n and current_app is _current_app_undefined\n and dirs is _dirs_undefined\n and dictionary is _dictionary_undefined):\n # No deprecated arguments were passed - use the new code path\n # In Django 1.10, request should become a positional argument.\n content = loader.render_to_string(\n template_name, context, request=request, using=using)\n\n else:\n # Some deprecated arguments were passed - use the legacy code path\n if context_instance is not _context_instance_undefined:\n if current_app is not _current_app_undefined:\n raise ValueError('If you provide a context_instance you must '\n 'set its current_app before calling render()')\n else:\n context_instance = RequestContext(request)\n if current_app is not _current_app_undefined:\n warnings.warn(\n \"The current_app argument of render is deprecated. \"\n \"Set the current_app attribute of request instead.\",\n RemovedInDjango110Warning, stacklevel=2)\n request.current_app = current_app\n # Directly set the private attribute to avoid triggering the\n # warning in RequestContext.__init__.\n context_instance._current_app = current_app\n\n content = loader.render_to_string(\n template_name, context, context_instance, dirs, dictionary,\n using=using)\n\n return HttpResponse(content, content_type, status)\n\n\ndef redirect(to, *args, **kwargs):\n \"\"\"\n Returns an HttpResponseRedirect to the appropriate URL for the arguments\n passed.\n\n The arguments could be:\n\n * A model: the model's `get_absolute_url()` function will be called.\n\n * A view name, possibly with arguments: `urlresolvers.reverse()` will\n be used to reverse-resolve the name.\n\n * A URL, which will be used as-is for the redirect location.\n\n By default issues a temporary redirect; pass permanent=True to issue a\n permanent redirect\n \"\"\"\n if kwargs.pop('permanent', False):\n redirect_class = HttpResponsePermanentRedirect\n else:\n redirect_class = HttpResponseRedirect\n\n return redirect_class(resolve_url(to, *args, **kwargs))\n\n\ndef _get_queryset(klass):\n \"\"\"\n Returns a QuerySet from a Model, Manager, or QuerySet. Created to make\n get_object_or_404 and get_list_or_404 more DRY.\n\n Raises a ValueError if klass is not a Model, Manager, or QuerySet.\n \"\"\"\n if isinstance(klass, QuerySet):\n return klass\n elif isinstance(klass, Manager):\n manager = klass\n elif isinstance(klass, ModelBase):\n manager = klass._default_manager\n else:\n if isinstance(klass, type):\n klass__name = klass.__name__\n else:\n klass__name = klass.__class__.__name__\n raise ValueError(\"Object is of type '%s', but must be a Django Model, \"\n \"Manager, or QuerySet\" % klass__name)\n return manager.all()\n\n\ndef get_object_or_404(klass, *args, **kwargs):\n \"\"\"\n Uses get() to return an object, or raises a Http404 exception if the object\n does not exist.\n\n klass may be a Model, Manager, or QuerySet object. All other passed\n arguments and keyword arguments are used in the get() query.\n\n Note: Like with get(), an MultipleObjectsReturned will be raised if more than one\n object is found.\n \"\"\"\n queryset = _get_queryset(klass)\n try:\n return queryset.get(*args, **kwargs)\n except queryset.model.DoesNotExist:\n raise Http404('No %s matches the given query.' % queryset.model._meta.object_name)\n\n\ndef get_list_or_404(klass, *args, **kwargs):\n \"\"\"\n Uses filter() to return a list of objects, or raise a Http404 exception if\n the list is empty.\n\n klass may be a Model, Manager, or QuerySet object. All other passed\n arguments and keyword arguments are used in the filter() query.\n \"\"\"\n queryset = _get_queryset(klass)\n obj_list = list(queryset.filter(*args, **kwargs))\n if not obj_list:\n raise Http404('No %s matches the given query.' % queryset.model._meta.object_name)\n return obj_list\n\n\ndef resolve_url(to, *args, **kwargs):\n \"\"\"\n Return a URL appropriate for the arguments passed.\n\n The arguments could be:\n\n * A model: the model's `get_absolute_url()` function will be called.\n\n * A view name, possibly with arguments: `urlresolvers.reverse()` will\n be used to reverse-resolve the name.\n\n * A URL, which will be returned as-is.\n \"\"\"\n # If it's a model, use get_absolute_url()\n if hasattr(to, 'get_absolute_url'):\n return to.get_absolute_url()\n\n if isinstance(to, Promise):\n # Expand the lazy instance, as it can cause issues when it is passed\n # further to some Python functions like urlparse.\n to = force_text(to)\n\n if isinstance(to, six.string_types):\n # Handle relative URLs\n if to.startswith(('./', '../')):\n return to\n\n # Next try a reverse URL resolution.\n try:\n return urlresolvers.reverse(to, args=args, kwargs=kwargs)\n except urlresolvers.NoReverseMatch:\n # If this is a callable, re-raise.\n if callable(to):\n raise\n # If this doesn't \"feel\" like a URL, re-raise.\n if '/' not in to and '.' not in to:\n raise\n\n # Finally, fall back and assume it's a URL\n return to\n"},"license":{"kind":"string","value":"mit"}}},{"rowIdx":2745,"cells":{"repo_name":{"kind":"string","value":"aperigault/ansible"},"path":{"kind":"string","value":"lib/ansible/modules/cloud/azure/azure_rm_sqlserver.py"},"copies":{"kind":"string","value":"24"},"size":{"kind":"string","value":"10519"},"content":{"kind":"string","value":"#!/usr/bin/python\n#\n# Copyright (c) 2017 Zim Kalinowski, \n#\n# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)\n\nfrom __future__ import absolute_import, division, print_function\n__metaclass__ = type\n\n\nANSIBLE_METADATA = {'metadata_version': '1.1',\n 'status': ['preview'],\n 'supported_by': 'community'}\n\n\nDOCUMENTATION = '''\n---\nmodule: azure_rm_sqlserver\nversion_added: \"2.5\"\nshort_description: Manage SQL Server instance\ndescription:\n - Create, update and delete instance of SQL Server.\n\noptions:\n resource_group:\n description:\n - The name of the resource group that contains the resource. You can obtain this value from the Azure Resource Manager API or the portal.\n required: True\n name:\n description:\n - The name of the server.\n required: True\n location:\n description:\n - Resource location.\n admin_username:\n description:\n - Administrator username for the server. Once created it cannot be changed.\n admin_password:\n description:\n - The administrator login password (required for server creation).\n version:\n description:\n - The version of the server. For example C(12.0).\n identity:\n description:\n - The identity type. Set this to C(SystemAssigned) in order to automatically create and assign an Azure Active Directory principal for the resource.\n - Possible values include C(SystemAssigned).\n state:\n description:\n - State of the SQL server. Use C(present) to create or update a server and use C(absent) to delete a server.\n default: present\n choices:\n - absent\n - present\n\nextends_documentation_fragment:\n - azure\n - azure_tags\n\nauthor:\n - Zim Kalinowski (@zikalino)\n\n'''\n\nEXAMPLES = '''\n - name: Create (or update) SQL Server\n azure_rm_sqlserver:\n resource_group: myResourceGroup\n name: server_name\n location: westus\n admin_username: mylogin\n admin_password: Testpasswordxyz12!\n'''\n\nRETURN = '''\nid:\n description:\n - Resource ID.\n returned: always\n type: str\n sample: /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Sql/servers/sqlcrudtest-4645\nversion:\n description:\n - The version of the server.\n returned: always\n type: str\n sample: 12.0\nstate:\n description:\n - The state of the server.\n returned: always\n type: str\n sample: state\nfully_qualified_domain_name:\n description:\n - The fully qualified domain name of the server.\n returned: always\n type: str\n sample: sqlcrudtest-4645.database.windows.net\n'''\n\nimport time\nfrom ansible.module_utils.azure_rm_common import AzureRMModuleBase\n\ntry:\n from msrestazure.azure_exceptions import CloudError\n from msrest.polling import LROPoller\n from azure.mgmt.sql import SqlManagementClient\n from msrest.serialization import Model\nexcept ImportError:\n # This is handled in azure_rm_common\n pass\n\n\nclass Actions:\n NoAction, Create, Update, Delete = range(4)\n\n\nclass AzureRMSqlServer(AzureRMModuleBase):\n \"\"\"Configuration class for an Azure RM SQL Server resource\"\"\"\n\n def __init__(self):\n self.module_arg_spec = dict(\n resource_group=dict(\n type='str',\n required=True\n ),\n name=dict(\n type='str',\n required=True\n ),\n location=dict(\n type='str'\n ),\n admin_username=dict(\n type='str'\n ),\n admin_password=dict(\n type='str',\n no_log=True\n ),\n version=dict(\n type='str'\n ),\n identity=dict(\n type='str'\n ),\n state=dict(\n type='str',\n default='present',\n choices=['present', 'absent']\n )\n )\n\n self.resource_group = None\n self.name = None\n self.parameters = dict()\n self.tags = None\n\n self.results = dict(changed=False)\n self.state = None\n self.to_do = Actions.NoAction\n\n super(AzureRMSqlServer, self).__init__(derived_arg_spec=self.module_arg_spec,\n supports_check_mode=True,\n supports_tags=True)\n\n def exec_module(self, **kwargs):\n \"\"\"Main module execution method\"\"\"\n\n for key in list(self.module_arg_spec.keys()) + ['tags']:\n if hasattr(self, key):\n setattr(self, key, kwargs[key])\n elif kwargs[key] is not None:\n if key == \"location\":\n self.parameters.update({\"location\": kwargs[key]})\n elif key == \"admin_username\":\n self.parameters.update({\"administrator_login\": kwargs[key]})\n elif key == \"admin_password\":\n self.parameters.update({\"administrator_login_password\": kwargs[key]})\n elif key == \"version\":\n self.parameters.update({\"version\": kwargs[key]})\n elif key == \"identity\":\n self.parameters.update({\"identity\": {\"type\": kwargs[key]}})\n\n old_response = None\n response = None\n results = dict()\n\n resource_group = self.get_resource_group(self.resource_group)\n\n if \"location\" not in self.parameters:\n self.parameters[\"location\"] = resource_group.location\n\n old_response = self.get_sqlserver()\n\n if not old_response:\n self.log(\"SQL Server instance doesn't exist\")\n if self.state == 'absent':\n self.log(\"Old instance didn't exist\")\n else:\n self.to_do = Actions.Create\n else:\n self.log(\"SQL Server instance already exists\")\n if self.state == 'absent':\n self.to_do = Actions.Delete\n elif self.state == 'present':\n self.log(\"Need to check if SQL Server instance has to be deleted or may be updated\")\n update_tags, newtags = self.update_tags(old_response.get('tags', dict()))\n if update_tags:\n self.tags = newtags\n self.to_do = Actions.Update\n\n if (self.to_do == Actions.Create) or (self.to_do == Actions.Update):\n self.log(\"Need to Create / Update the SQL Server instance\")\n\n if self.check_mode:\n self.results['changed'] = True\n return self.results\n\n self.parameters['tags'] = self.tags\n response = self.create_update_sqlserver()\n response.pop('administrator_login_password', None)\n\n if not old_response:\n self.results['changed'] = True\n else:\n self.results['changed'] = old_response.__ne__(response)\n self.log(\"Creation / Update done\")\n elif self.to_do == Actions.Delete:\n self.log(\"SQL Server instance deleted\")\n self.results['changed'] = True\n\n if self.check_mode:\n return self.results\n\n self.delete_sqlserver()\n # make sure instance is actually deleted, for some Azure resources, instance is hanging around\n # for some time after deletion -- this should be really fixed in Azure\n while self.get_sqlserver():\n time.sleep(20)\n else:\n self.log(\"SQL Server instance unchanged\")\n self.results['changed'] = False\n response = old_response\n\n if response:\n self.results[\"id\"] = response[\"id\"]\n self.results[\"version\"] = response[\"version\"]\n self.results[\"state\"] = response[\"state\"]\n self.results[\"fully_qualified_domain_name\"] = response[\"fully_qualified_domain_name\"]\n\n return self.results\n\n def create_update_sqlserver(self):\n '''\n Creates or updates SQL Server with the specified configuration.\n\n :return: deserialized SQL Server instance state dictionary\n '''\n self.log(\"Creating / Updating the SQL Server instance {0}\".format(self.name))\n\n try:\n response = self.sql_client.servers.create_or_update(self.resource_group,\n self.name,\n self.parameters)\n if isinstance(response, LROPoller):\n response = self.get_poller_result(response)\n\n except CloudError as exc:\n self.log('Error attempting to create the SQL Server instance.')\n self.fail(\"Error creating the SQL Server instance: {0}\".format(str(exc)))\n return response.as_dict()\n\n def delete_sqlserver(self):\n '''\n Deletes specified SQL Server instance in the specified subscription and resource group.\n\n :return: True\n '''\n self.log(\"Deleting the SQL Server instance {0}\".format(self.name))\n try:\n response = self.sql_client.servers.delete(self.resource_group,\n self.name)\n except CloudError as e:\n self.log('Error attempting to delete the SQL Server instance.')\n self.fail(\"Error deleting the SQL Server instance: {0}\".format(str(e)))\n\n return True\n\n def get_sqlserver(self):\n '''\n Gets the properties of the specified SQL Server.\n\n :return: deserialized SQL Server instance state dictionary\n '''\n self.log(\"Checking if the SQL Server instance {0} is present\".format(self.name))\n found = False\n try:\n response = self.sql_client.servers.get(self.resource_group,\n self.name)\n found = True\n self.log(\"Response : {0}\".format(response))\n self.log(\"SQL Server instance : {0} found\".format(response.name))\n except CloudError as e:\n self.log('Did not find the SQL Server instance.')\n if found is True:\n return response.as_dict()\n\n return False\n\n\ndef main():\n \"\"\"Main execution\"\"\"\n AzureRMSqlServer()\n\n\nif __name__ == '__main__':\n main()\n"},"license":{"kind":"string","value":"gpl-3.0"}}},{"rowIdx":2746,"cells":{"repo_name":{"kind":"string","value":"britcey/ansible"},"path":{"kind":"string","value":"lib/ansible/modules/network/dellos9/dellos9_command.py"},"copies":{"kind":"string","value":"46"},"size":{"kind":"string","value":"7781"},"content":{"kind":"string","value":"#!/usr/bin/python\n#\n# (c) 2015 Peter Sprygada, \n#\n# Copyright (c) 2016 Dell Inc.\n#\n# This file is part of Ansible\n#\n# Ansible is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# Ansible is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with Ansible. If not, see .\n#\n\nANSIBLE_METADATA = {'status': ['preview'],\n 'supported_by': 'community',\n 'metadata_version': '1.0'}\n\nDOCUMENTATION = \"\"\"\n---\nmodule: dellos9_command\nversion_added: \"2.2\"\nauthor: \"Dhivya P (@dhivyap)\"\nshort_description: Run commands on remote devices running Dell OS9\ndescription:\n - Sends arbitrary commands to a Dell OS9 node and returns the results\n read from the device. This module includes an\n argument that will cause the module to wait for a specific condition\n before returning or timing out if the condition is not met.\n - This module does not support running commands in configuration mode.\n Please use M(dellos9_config) to configure Dell OS9 devices.\nextends_documentation_fragment: dellos9\noptions:\n commands:\n description:\n - List of commands to send to the remote dellos9 device over the\n configured provider. The resulting output from the command\n is returned. If the I(wait_for) argument is provided, the\n module is not returned until the condition is satisfied or\n the number of retries has expired.\n required: true\n wait_for:\n description:\n - List of conditions to evaluate against the output of the\n command. The task will wait for each condition to be true\n before moving forward. If the conditional is not true\n within the configured number of I(retries), the task fails.\n See examples.\n required: false\n default: null\n retries:\n description:\n - Specifies the number of retries a command should be tried\n before it is considered failed. The command is run on the\n target device every retry and evaluated against the\n I(wait_for) conditions.\n required: false\n default: 10\n interval:\n description:\n - Configures the interval in seconds to wait between retries\n of the command. If the command does not pass the specified\n conditions, the interval indicates how long to wait before\n trying the command again.\n required: false\n default: 1\n\nnotes:\n - This module requires Dell OS9 version 9.10.0.1P13 or above.\n\n - This module requires to increase the ssh connection rate limit.\n Use the following command I(ip ssh connection-rate-limit 60)\n to configure the same. This can be done via M(dellos9_config) module\n as well.\n\n\"\"\"\n\nEXAMPLES = \"\"\"\n# Note: examples below use the following provider dict to handle\n# transport and authentication to the node.\nvars:\n cli:\n host: \"{{ inventory_hostname }}\"\n username: admin\n password: admin\n transport: cli\n\ntasks:\n - name: run show version on remote devices\n dellos9_command:\n commands: show version\n provider: \"{{ cli }}\"\n\n - name: run show version and check to see if output contains OS9\n dellos9_command:\n commands: show version\n wait_for: result[0] contains OS9\n provider: \"{{ cli }}\"\n\n - name: run multiple commands on remote nodes\n dellos9_command:\n commands:\n - show version\n - show interfaces\n provider: \"{{ cli }}\"\n\n - name: run multiple commands and evaluate the output\n dellos9_command:\n commands:\n - show version\n - show interfaces\n wait_for:\n - result[0] contains OS9\n - result[1] contains Loopback\n provider: \"{{ cli }}\"\n\"\"\"\n\nRETURN = \"\"\"\nstdout:\n description: The set of responses from the commands\n returned: always apart from low level errors (such as action plugin)\n type: list\n sample: ['...', '...']\nstdout_lines:\n description: The value of stdout split into a list\n returned: always apart from low level errors (such as action plugin)\n type: list\n sample: [['...', '...'], ['...'], ['...']]\nfailed_conditions:\n description: The list of conditionals that have failed\n returned: failed\n type: list\n sample: ['...', '...']\nwarnings:\n description: The list of warnings (if any) generated by module based on arguments\n returned: always\n type: list\n sample: ['...', '...']\n\"\"\"\nimport time\n\nfrom ansible.module_utils.dellos9 import run_commands\nfrom ansible.module_utils.dellos9 import dellos9_argument_spec, check_args\nfrom ansible.module_utils.basic import AnsibleModule\nfrom ansible.module_utils.network_common import ComplexList\nfrom ansible.module_utils.netcli import Conditional\n\n\ndef to_lines(stdout):\n for item in stdout:\n if isinstance(item, basestring):\n item = str(item).split('\\n')\n yield item\n\n\ndef parse_commands(module, warnings):\n command = ComplexList(dict(\n command=dict(key=True),\n prompt=dict(),\n answer=dict()\n ), module)\n commands = command(module.params['commands'])\n for index, item in enumerate(commands):\n if module.check_mode and not item['command'].startswith('show'):\n warnings.append(\n 'only show commands are supported when using check mode, not '\n 'executing `%s`' % item['command']\n )\n elif item['command'].startswith('conf'):\n module.fail_json(\n msg='dellos9_command does not support running config mode '\n 'commands. Please use dellos9_config instead'\n )\n return commands\n\n\ndef main():\n \"\"\"main entry point for module execution\n \"\"\"\n argument_spec = dict(\n # { command: , prompt: , response: }\n commands=dict(type='list', required=True),\n\n wait_for=dict(type='list', aliases=['waitfor']),\n match=dict(default='all', choices=['all', 'any']),\n\n retries=dict(default=10, type='int'),\n interval=dict(default=1, type='int')\n )\n\n argument_spec.update(dellos9_argument_spec)\n\n module = AnsibleModule(argument_spec=argument_spec,\n supports_check_mode=True)\n\n result = {'changed': False}\n\n warnings = list()\n check_args(module, warnings)\n commands = parse_commands(module, warnings)\n result['warnings'] = warnings\n\n wait_for = module.params['wait_for'] or list()\n conditionals = [Conditional(c) for c in wait_for]\n\n retries = module.params['retries']\n interval = module.params['interval']\n match = module.params['match']\n\n while retries > 0:\n responses = run_commands(module, commands)\n\n for item in list(conditionals):\n if item(responses):\n if match == 'any':\n conditionals = list()\n break\n conditionals.remove(item)\n\n if not conditionals:\n break\n\n time.sleep(interval)\n retries -= 1\n\n if conditionals:\n failed_conditions = [item.raw for item in conditionals]\n msg = 'One or more conditional statements have not be satisfied'\n module.fail_json(msg=msg, failed_conditions=failed_conditions)\n\n result = {\n 'changed': False,\n 'stdout': responses,\n 'stdout_lines': list(to_lines(responses))\n }\n\n module.exit_json(**result)\n\n\nif __name__ == '__main__':\n main()\n"},"license":{"kind":"string","value":"gpl-3.0"}}},{"rowIdx":2747,"cells":{"repo_name":{"kind":"string","value":"emonty/vhd-util"},"path":{"kind":"string","value":"tools/python/logging/logging-0.4.9.2/test/log_test11.py"},"copies":{"kind":"string","value":"42"},"size":{"kind":"string","value":"2993"},"content":{"kind":"string","value":"#!/usr/bin/env python\n#\n# Copyright 2001-2002 by Vinay Sajip. All Rights Reserved.\n#\n# Permission to use, copy, modify, and distribute this software and its\n# documentation for any purpose and without fee is hereby granted,\n# provided that the above copyright notice appear in all copies and that\n# both that copyright notice and this permission notice appear in\n# supporting documentation, and that the name of Vinay Sajip\n# not be used in advertising or publicity pertaining to distribution\n# of the software without specific, written prior permission.\n# VINAY SAJIP DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING\n# ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL\n# VINAY SAJIP BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR\n# ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER\n# IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT\n# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.\n#\n# This file is part of the Python logging distribution. See\n# http://www.red-dove.com/python_logging.html\n#\n\"\"\"Test harness for the logging module. Tests BufferingSMTPHandler, an alternative implementation\nof SMTPHandler.\n\nCopyright (C) 2001-2002 Vinay Sajip. All Rights Reserved.\n\"\"\"\nimport string, logging, logging.handlers\n\nMAILHOST = 'beta'\nFROM = 'log_test11@red-dove.com'\nTO = ['arkadi_renko']\nSUBJECT = 'Test Logging email from Python logging module (buffering)'\n\nclass BufferingSMTPHandler(logging.handlers.BufferingHandler):\n def __init__(self, mailhost, fromaddr, toaddrs, subject, capacity):\n logging.handlers.BufferingHandler.__init__(self, capacity)\n self.mailhost = mailhost\n self.mailport = None\n self.fromaddr = fromaddr\n self.toaddrs = toaddrs\n self.subject = subject\n self.setFormatter(logging.Formatter(\"%(asctime)s %(levelname)-5s %(message)s\"))\n\n def flush(self):\n if len(self.buffer) > 0:\n try:\n import smtplib\n port = self.mailport\n if not port:\n port = smtplib.SMTP_PORT\n smtp = smtplib.SMTP(self.mailhost, port)\n msg = \"From: %s\\r\\nTo: %s\\r\\nSubject: %s\\r\\n\\r\\n\" % (self.fromaddr, string.join(self.toaddrs, \",\"), self.subject)\n for record in self.buffer:\n s = self.format(record)\n print s\n msg = msg + s + \"\\r\\n\"\n smtp.sendmail(self.fromaddr, self.toaddrs, msg)\n smtp.quit()\n except:\n self.handleError(None) # no particular record\n self.buffer = []\n\ndef test():\n logger = logging.getLogger(\"\")\n logger.setLevel(logging.DEBUG)\n logger.addHandler(BufferingSMTPHandler(MAILHOST, FROM, TO, SUBJECT, 10))\n for i in xrange(102):\n logger.info(\"Info index = %d\", i)\n logging.shutdown()\n\nif __name__ == \"__main__\":\n test()"},"license":{"kind":"string","value":"gpl-2.0"}}},{"rowIdx":2748,"cells":{"repo_name":{"kind":"string","value":"kooksee/TIOT"},"path":{"kind":"string","value":"test/project/src/app/proto/protocol/XBeeProtocol.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"11235"},"content":{"kind":"string","value":"# encoding=utf-8\r\nimport binascii\r\nimport json\r\nfrom twisted.internet.protocol import Protocol\r\nfrom app.proto.controller.XbeeController import XBeeController\r\n\r\nclass XBeeProtocol(Protocol):\r\n def __init__(self):\r\n self.ip = ''\r\n self.port = ''\r\n\r\n def connectionMade(self):\r\n\r\n\r\n #import socket\r\n #self.transport.socket._sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\r\n self.ip = str(self.transport.client[0])\r\n self.port = str(self.transport.client[1])\r\n self.factory.numProtocols += 1\r\n\r\n print 'conn build From ip:' + self.ip + ' port:' + self.port\r\n print 'current conn num is ' + str(self.factory.numProtocols) + \"\\n\"\r\n\r\n self.divName = self.ip +\":\"+ self.port+\"##\"+self.__class__.__name__\r\n # self.divName = repr(self) + \"##\" + self.__class__.__name__\r\n self.factory.controller.add_client(self.divName, self.transport)\r\n return\r\n\r\n def connectionLost(self, reason):\r\n print 'conn lost reason --> '+str(reason)\r\n self.factory.numProtocols -= 1\r\n\r\n print 'conn lost. ip:' + self.ip + ' port:' + self.port\r\n print 'current conn num is ' + str(self.factory.numProtocols) + \"\\n\"\r\n self.factory.controller.del_client(self.divName)\r\n return\r\n\r\n def dataReceived(self, data):\r\n xbeeController = XBeeController()\r\n # print 'recv data from'+self.divName + \"\\n\" + binascii.b2a_hex(data)\r\n print 'recv data from ip:' + self.ip + ' port:' + self.port + ' data:' + \"\\n\" + binascii.b2a_hex(data)\r\n kdiv = self.factory.controller.online_session\r\n for div in kdiv:\r\n if div == self.divName:\r\n print \"设备\" + div + \"正在把数据-->\"\r\n\r\n\tdata1 = binascii.b2a_hex(data)\r\n \tprint data1\r\n \tdata2 = xbeeController.getPackets(data1).get_import_data()\r\n\r\n for div in kdiv:\r\n\t #print binascii.b2a_hex(data)\r\n # print div.split(\"##\")[-1],\" \",self.__class__.__name__\r\n\r\n\t if div.split(\"##\")[-1] == \"LightProtocol\":\r\n\t\tif data2[0].get(\"hot\") or data2[0].get(\"smog\"):\r\n\t\t\tdata_hex = ' 7e 00 16 10 00 00 7d 33 a2 00 40 71 54 0a ff fe 00 00 01 00 00 03 00 00 00 00 2a'\r\n\t\t\tdata_hex = str(bytearray.fromhex(data_hex))\r\n\r\n\t\t\tdata_hex1 = '7e 00 16 10 00 00 7d 33 a2 00 40 71 53 bc ff fe 00 00 01 00 00 03 00 00 00 00 79'\r\n\t\t\tdata_hex1 = str(bytearray.fromhex(data_hex1))\r\n\r\n\t\t \tprint data_hex\r\n\t\t\tkdiv[div].write(data_hex)\r\n\t\t\tkdiv[div].write(data_hex1)\r\n\r\n\r\n\r\n if div.split(\"##\")[-1] == self.__class__.__name__:\r\n\r\n # data = xbeeController.getPackets(\r\n # \"7E 00 1B 20 1B 00 60 03 02 11 00 03 50 01 01 00 01 00 66 66 A6 41 02 00 02 00 1F 85 17 42 44\").get_import_data()\r\n # print data\r\n str_data = json.dumps(data2)\r\n print str_data\r\n kdiv[div].write(str_data)\r\n print \"传递给:\" + div\r\n print \"\\n\"\r\n return\r\n\r\n\r\n# 7E 00 1B 20 1B 00 60 03 02 11 00 03 50 01 01 00 01 00 00 00 A8 41 02 00 02 00 B8 1E 0A 42 E9\r\n# 7E 00 1B 20 19 01 00 00 02 11 00 03 50 03 01 00 01 00 00 00 9F 41 11 22 33 44 11 22 33 44 26\r\n# 7E 00 1B 20 19 01 00 00 02 11 00 03 50 03 01 00 01 00 00 80 9E 41 11 22 33 44 11 22 33 44 A7\r\n# 7E 00 1B 20 19 01 00 00 02 11 00 03 50 03 01 00 01 00 00 80 9F 41 11 22 33 44 11 22 33 44 A6\r\n# 7E 00 1B 20 1B 00 60 03 02 11 00 03 50 01 01 00 01 00 00 00 A8 41 02 00 02 00 B8 1E 0A 42 E9\r\n# 7E 00 1B 20 19 01 00 00 02 11 00 03 50 03 01 00 01 00 00 00 9E 41 11 22 33 44 11 22 33 44 27\r\n# 7E 00 1B 20 19 01 00 00 02 11 00 03 50 03 01 00 01 00 00 00 9F 41 11 22 33 44 11 22 33 44 26\r\n# 7E 00 1B 20 1B 00 60 03 02 11 00 03 50 01 01 00 01 00 00 00 A8 41 02 00 02 00 AE 47 0A 42 CA\r\n# 7E 00 1B 20 19 01 00 00 02 11 00 03 50 03 01 00 01 00 00 00 9E 41 11 22 33 44 11 22 33 44 27\r\n# 7E 00 13 20 19 01 00 00 02 11 00 03 50 01 0E 00 0E 00 00 00 80 3F 83\r\n# 7E 00 1B 20 19 01 00 00 02 11 00 03 50 03 01 00 01 00 00 80 9E 41 11 22 33 44 11 22 33 44 A7\r\n# 7E 00 1B 20 19 01 00 00 02 11 00 03 50 03 01 00 01 00 00 80 9F 41 11 22 33 44 11 22 33 44 A6\r\n# 7E 00 0F 22 4C 4F 49 54 02 04 00 00 60 00 CB 88 BB 54 DD\r\n# 7E 00 1B 20 1B 00 60 03 02 11 00 03 50 01 01 00 01 00 00 00 A8 41 02 00 02 00 AE 47 0A 42 CA\r\n# 7E 00 1B 20 19 01 00 00 02 11 00 03 50 03 01 00 01 00 00 00 BC 41 11 22 33 44 11 22 33 44 09\r\n# 7E 00 1B 20 19 01 00 00 02 11 00 03 50 03 01 00 01 00 00 80 BE 41 11 22 33 44 11 22 33 44 87\r\n# 7E 00 13 20 19 01 00 00 02 11 00 03 50 01 0E 00 0E 00 00 00 80 3F 83\r\n# 7E 00 13 20 19 01 00 00 02 11 00 03 50 01 0E 00 0E 00 00 00 80 3F 83\r\n# 7E 00 1B 20 19 01 00 00 02 11 00 03 50 03 01 00 01 00 00 00 B4 41 11 22 33 44 11 22 33 44 11\r\n# 7E 00 1B 20 19 01 00 00 02 11 00 03 50 03 01 00 01 00 00 00 B7 41 11 22 33 44 11 22 33 44 0E\r\n# 7E 00 1B 20 1B 00 60 03 02 11 00 03 50 01 01 00 01 00 00 00 A8 41 02 00 02 00 AE 47 0A 42 CA\r\n# 7E 00 1B 20 1B 00 60 03 02 11 00 03 50 01 01 00 01 00 00 00 A8 41 02 00 02 00 AE 47 0A 42 CA\r\n# 7E 00 0F 22 4C 4F 49 54 02 04 00 00 60 00 E9 88 BB 54 BF\r\n# 7E 00 13 20 08 00 00 00 02 11 00 03 50 01 01 00 01 00 00 00 AA 42 82\r\n# 7E 00 13 20 08 00 00 00 02 11 00 03 50 01 01 00 01 00 00 00 AA 42 82\r\n# 7E 00 1B 20 1B 00 60 03 02 11 00 03 50 01 01 00 01 00 00 00 A8 41 02 00 02 00 B8 1E 0A 42 E9\r\n# 7E 00 13 20 08 00 00 00 02 11 00 03 50 01 01 00 01 00 00 00 B2 41 7B\r\n# 7E 00 13 20 08 00 00 00 02 11 00 03 50 01 01 00 01 00 00 80 AD 41 00\r\n# 7E 00 13 20 08 00 00 00 02 11 00 03 50 01 01 00 01 00 00 00 B0 41 7D 5D\r\n# 7E 00 1B 20 1B 00 60 03 02 11 00 03 50 01 01 00 01 00 00 00 A8 41 02 00 02 00 AE 47 0A 42 CA\r\n# 7E 00 13 20 08 00 00 00 02 11 00 03 50 01 01 00 01 00 00 00 AD 41 80\r\n# 7E 00 13 20 08 00 00 00 02 11 00 03 50 01 01 00 01 00 00 80 B2 41 FB\r\n# 7E 00 13 20 08 00 00 00 02 11 00 03 50 01 01 00 01 00 00 00 8A C1 23\r\n# 7E 00 1B 20 1B 00 60 03 02 11 00 03 50 01 01 00 01 00 00 00 A8 41 02 00 02 00 66 66 0A 42 F3\r\n# 7E 00 0F 22 4C 4F 49 54 02 04 00 00 60 00 07 89 BB 54 A0\r\n# 7E 00 13 20 08 00 00 00 02 11 00 03 50 01 01 00 01 00 00 80 AE 41 FF\r\n# 7E 00 13 20 08 00 00 00 02 11 00 03 50 01 01 00 01 00 00 00 FA 40 34\r\n# 7E 00 13 20 08 00 00 00 02 11 00 03 50 01 01 00 01 00 00 80 B5 41 F8\r\n# 7E 00 1B 20 1B 00 60 03 02 11 00 03 50 01 01 00 01 00 00 00 A8 41 02 00 02 00 66 66 0A 42 F3\r\n# 7E 00 13 20 08 00 00 00 02 11 00 03 50 01 01 00 01 00 00 80 AE 41 FF\r\n# 7E 00 1B 20 1B 00 60 03 02 11 00 03 50 01 01 00 01 00 00 00 A8 41 02 00 02 00 66 66 0A 42 F3\r\n# 7E 00 13 20 08 00 00 00 02 11 00 03 50 01 01 00 01 00 00 00 AF 41 7D 5E\r\n# 7E 00 13 20 08 00 00 00 02 11 00 03 50 01 01 00 01 00 00 00 AF 41 7D 5E\r\n# 7E 00 0F 22 4C 4F 49 54 02 04 00 00 60 00 25 89 BB 54 82\r\n# 7E 00 13 20 08 00 00 00 02 11 00 03 50 01 01 00 01 00 00 80 AC 41 01\r\n# 7E 00 1B 20 1B 00 60 03 02 11 00 03 50 01 01 00 01 00 00 00 A8 41 02 00 02 00 66 66 0A 42 F3\r\n# 7E 00 13 20 08 00 00 00 02 11 00 03 50 01 01 00 01 00 00 00 AF 41 7D 5E\r\n# 7E 00 13 20 08 00 00 00 02 11 00 03 50 01 01 00 01 00 00 00 AD 41 80\r\n# 7E 00 1B 20 1B 00 60 03 02 11 00 03 50 01 01 00 01 00 00 00 A8 41 02 00 02 00 66 66 0A 42 F3\r\n# 7E 00 13 20 08 00 00 00 02 11 00 03 50 01 01 00 01 00 00 00 AF 41 7D 5E\r\n# 7E 00 13 20 08 00 00 00 02 11 00 03 50 01 01 00 01 00 00 80 AC 41 01\r\n# 7E 00 13 20 08 00 00 00 02 11 00 03 50 01 01 00 01 00 00 80 AF 41 FE\r\n# 7E 00 13 20 08 00 00 00 02 11 00 03 50 01 01 00 01 00 00 00 AD 41 80\r\n# 7E 00 1B 20 1B 00 60 03 02 11 00 03 50 01 01 00 01 00 00 00 A8 41 02 00 02 00 66 66 0A 42 F3\r\n# 7E 00 13 20 08 00 00 00 02 11 00 03 50 01 01 00 01 00 00 80 AF 41 FE\r\n# 7E 00 13 20 08 00 00 00 02 11 00 03 50 01 01 00 01 00 00 80 AD 41 00\r\n# 7E 00 0F 22 4C 4F 49 54 02 04 00 00 60 00 43 89 BB 54 64\r\n# 7E 00 1B 20 1B 00 60 03 02 11 00 03 50 01 01 00 01 00 00 00 A8 41 02 00 02 00 66 66 0A 42 F3\r\n# 7E 00 13 20 08 00 00 00 02 11 00 03 50 01 01 00 01 00 00 80 AF 41 FE\r\n# 7E 00 13 20 08 00 00 00 02 11 00 03 50 01 01 00 01 00 00 80 AD 41 00\r\n# 7E 00 13 20 08 00 00 00 02 11 00 03 50 01 01 00 01 00 00 80 AF 41 FE\r\n# 7E 00 13 20 08 00 00 00 02 11 00 03 50 01 01 00 01 00 00 00 AD 41 80\r\n# 7E 00 1B 20 1B 00 60 03 02 11 00 03 50 01 01 00 01 00 00 00 A8 41 02 00 02 00 66 66 0A 42 F3\r\n# 7E 00 13 20 08 00 00 00 02 11 00 03 50 01 01 00 01 00 00 00 B0 41 7D 5D\r\n# 7E 00 13 20 08 00 00 00 02 11 00 03 50 01 01 00 01 00 00 80 AD 41 00\r\n# 7E 00 13 20 08 00 00 00 02 11 00 03 50 01 01 00 01 00 00 00 B0 41 7D 5D\r\n# 7E 00 1B 20 1B 00 60 03 02 11 00 03 50 01 01 00 01 00 00 00 A8 41 02 00 02 00 66 66 0A 42 F3\r\n# 7E 00 13 20 08 00 00 00 02 11 00 03 50 01 01 00 01 00 00 80 AD 41 00\r\n# 7E 00 0F 22 4C 4F 49 54 02 04 00 00 60 00 61 89 BB 54 46\r\n# 7E 00 13 20 08 00 00 00 02 11 00 03 50 01 01 00 01 00 00 00 B0 41 7D 5D\r\n# 7E 00 13 20 08 00 00 00 02 11 00 03 50 01 01 00 01 00 00 80 AD 41 00\r\n# 7E 00 1B 20 1B 00 60 03 02 11 00 03 50 01 01 00 01 00 00 00 A8 41 02 00 02 00 66 66 0A 42 F3\r\n# 7E 00 13 20 08 00 00 00 02 11 00 03 50 01 01 00 01 00 00 00 B0 41 7D 5D\r\n# 7E 00 13 20 08 00 00 00 02 11 00 03 50 01 01 00 01 00 00 80 AD 41 00\r\n# 7E 00 13 20 08 00 00 00 02 11 00 03 50 01 01 00 01 00 00 00 B0 41 7D 5D\r\n# 7E 00 13 20 08 00 00 00 02 11 00 03 50 01 01 00 01 00 00 00 AE 41 7F\r\n# 7E 00 1B 20 1B 00 60 03 02 11 00 03 50 01 01 00 01 00 00 00 A8 41 02 00 02 00 5C 8F 0A 42 D4\r\n# 7E 00 13 20 08 00 00 00 02 11 00 03 50 01 01 00 01 00 00 00 B0 41 7D 5D\r\n# 7E 00 13 20 08 00 00 00 02 11 00 03 50 01 01 00 01 00 00 00 AE 41 7F\r\n# 7E 00 1B 20 1B 00 60 03 02 11 00 03 50 01 01 00 01 00 00 00 A8 41 02 00 02 00 14 AE 0A 42 FD\r\n# 7E 00 0F 22 4C 4F 49 54 02 04 00 00 60 00 7F 89 BB 54 28\r\n# 7E 00 13 20 08 00 00 00 02 11 00 03 50 01 01 00 01 00 00 80 B0 41 FD\r\n# 7E 00 13 20 08 00 00 00 02 11 00 03 50 01 01 00 01 00 00 00 AE 41 7F\r\n# 7E 00 13 20 08 00 00 00 02 11 00 03 50 01 01 00 01 00 00 00 B0 41 7D 5D\r\n# 7E 00 13 20 08 00 00 00 02 11 00 03 50 01 01 00 01 00 00 00 28 41 05\r\n# 7E 00 1B 20 1B 00 60 03 02 11 00 03 50 01 01 00 01 00 00 00 A8 41 02 00 02 00 14 AE 0A 42 FD\r\n# 7E 00 13 20 08 00 00 00 02 11 00 03 50 01 01 00 01 00 00 80 AD 41 00\r\n# 7E 00 13 20 08 00 00 00 02 11 00 03 50 01 01 00 01 00 00 80 A8 C1 85\r\n# 7E 00 1B 20 1B 00 60 03 02 11 00 03 50 01 01 00 01 00 00 00 A8 41 02 00 02 00 5C 8F 0A 42 D4\r\n# 7E 00 13 20 08 00 00 00 02 11 00 03 50 01 01 00 01 00 00 80 AD 41 00\r\n# 7E 00 13 20 08 00 00 00 02 11 00 03 50 01 01 00 01 00 00 80 A1 C1 8C\r\n# 7E 00 13 20 08 00 00 00 02 11 00 03 50 01 01 00 01 00 00 80 AD 41 00\r\n# 7E 00 13 20 08 00 00 00 02 11 00 03 50 01 01 00 01 00 00 80 A3 C1 8A\r\n# 7E 00 0F 22 4C 4F 49 54 02 04 00 00 60 00 9D 89 BB 54 0A\r\n# 7E 00 1B 20 1B 00 60 03 02 11 00 03 50 01 01 00 01 00 00 00 A8 41 02 00 02 00 14 AE 0A 42 FD\r\n# 7E 00 13 20 08 00 00 00 02 11 00 03 50 01 01 00 01 00 00 80 AD 41 00\r\n# 7E 00 13 20 08 00 00 00 02 11 00 03 50 01 01 00 01 00 00 80 9D C1 90\r\n# 7E 00 13 20 08 00 00 00 02 11 00 03 50 01 01 00 01 00 00 80 AD 41 00\r\n# 7E 00 1B 20 1B 00 60 03 02 11 00 03 50 01 01 00 01 00 00 00 A8 41 02 00 02 00 14 AE 0A 42 FD\r\n# 7E 00 13 20 08 00 00 00 02 11 00 03 50 01 01 00 01 00 00 00 A5 C1 08\r\n# 7E 00 1B 20 1B 00 60 03 02 11 00 03 50 01 01 00 01 00 00 00 A8 41 02 00 02 00 14 AE 0A 42 FD\r\n# 7E 00 0F 22 4C 4F 49 54 02 04 00 00 60 00 BB 89 BB 54 EC\r\n# 7E 00 1B 20 1B 00 60 03 02 11 00 03 50 01 01 00 01 00 00 00 A8 41 02 00 02 00 14 AE 0A 42 FD\r\n"},"license":{"kind":"string","value":"gpl-2.0"}}},{"rowIdx":2749,"cells":{"repo_name":{"kind":"string","value":"ruthger/Archipel"},"path":{"kind":"string","value":"ArchipelAgent/archipel-agent-action-scheduler/archipelagentactionscheduler/__init__.py"},"copies":{"kind":"string","value":"5"},"size":{"kind":"string","value":"2236"},"content":{"kind":"string","value":"# -*- coding: utf-8 -*-\n#\n# __init__.py\n#\n# Copyright (C) 2010 Antoine Mercadal \n# Copyright, 2011 - Franck Villaume \n# This file is part of ArchipelProject\n# http://archipelproject.org\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Affero General Public License as\n# published by the Free Software Foundation, either version 3 of the\n# License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Affero General Public License for more details.\n#\n# You should have received a copy of the GNU Affero General Public License\n# along with this program. If not, see .\n\nimport actionscheduler\n\n\ndef make_archipel_plugin(configuration, entity, group):\n \"\"\"\n This function is the plugin factory. It will be called by the object you want\n to be plugged in. It must return a list whit at least on dictionary containing\n a key for the the plugin informations, and a key for the plugin object.\n @type configuration: Config Object\n @param configuration: the general configuration object\n @type entity: L{TNArchipelEntity}\n @param entity: the entity that has load the plugin\n @type group: string\n @param group: the entry point group name in which the plugin has been loaded\n @rtype: array\n @return: array of dictionary containing the plugins informations and objects\n \"\"\"\n return [{\"info\": actionscheduler.TNActionScheduler.plugin_info(),\n \"plugin\": actionscheduler.TNActionScheduler(configuration, entity, group)}]\n\ndef version():\n \"\"\"\n This function can be called runarchipel -v in order to get the version of the\n installed plugin. You only should have to change the egg name.\n @rtype: tupple\n @return: tupple containing the package name and the version\n \"\"\"\n import pkg_resources\n return (__name__, pkg_resources.get_distribution(\"archipel-agent-action-scheduler\").version, [actionscheduler.TNActionScheduler.plugin_info()])"},"license":{"kind":"string","value":"agpl-3.0"}}},{"rowIdx":2750,"cells":{"repo_name":{"kind":"string","value":"LeandroRoberto/sapl"},"path":{"kind":"string","value":"sapl/relatorios/templates/pdf_pauta_sessao_preparar_pysc.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"9023"},"content":{"kind":"string","value":"import os\n\nrequest=context.REQUEST\nresponse=request.RESPONSE\nsession= request.SESSION\n\nif context.REQUEST['data']!='':\n dat_inicio_sessao = context.REQUEST['data']\n pauta = [] # lista contendo a pauta da ordem do dia a ser impressa \n data = context.pysc.data_converter_pysc(dat_inicio_sessao) # converte data para formato yyyy/mm/dd\n codigo = context.REQUEST['cod_sessao_plen']\n\n # seleciona as matérias que compõem a pauta na data escolhida\n for sessao in context.zsql.sessao_plenaria_obter_zsql(dat_inicio_sessao=data, cod_sessao_plen=codigo, ind_excluido=0):\n inf_basicas_dic = {} # dicionário que armazenará as informacoes basicas da sessao plenaria \n # seleciona o tipo da sessao plenaria\n tipo_sessao = context.zsql.tipo_sessao_plenaria_obter_zsql(tip_sessao=sessao.tip_sessao,ind_excluido=0)[0]\n inf_basicas_dic[\"nom_sessao\"] = tipo_sessao.nom_sessao\n inf_basicas_dic[\"num_sessao_plen\"] = sessao.num_sessao_plen\n inf_basicas_dic[\"nom_sessao\"] = tipo_sessao.nom_sessao\n inf_basicas_dic[\"num_legislatura\"] = sessao.num_legislatura\n inf_basicas_dic[\"num_sessao_leg\"] = sessao.num_sessao_leg\n inf_basicas_dic[\"dat_inicio_sessao\"] = sessao.dat_inicio_sessao\n inf_basicas_dic[\"hr_inicio_sessao\"] = sessao.hr_inicio_sessao\n inf_basicas_dic[\"dat_fim_sessao\"] = sessao.dat_fim_sessao\n inf_basicas_dic[\"hr_fim_sessao\"] = sessao.hr_fim_sessao\n \n # Lista das matérias do Expediente, incluindo o status da tramitação\n lst_expediente_materia=[]\n for expediente_materia in context.zsql.votacao_expediente_materia_obter_zsql(dat_ordem=data,cod_sessao_plen=codigo,ind_excluido=0):\n \n # seleciona os detalhes de uma matéria\n materia = context.zsql.materia_obter_zsql(cod_materia=expediente_materia.cod_materia)[0]\n\n dic_expediente_materia = {}\n dic_expediente_materia[\"num_ordem\"] = expediente_materia.num_ordem\n dic_expediente_materia[\"id_materia\"] = materia.sgl_tipo_materia+\" - \"+materia.des_tipo_materia+\" No. \"+str(materia.num_ident_basica)+\"/\"+str(materia.ano_ident_basica)\n dic_expediente_materia[\"txt_ementa\"] = materia.txt_ementa\n dic_expediente_materia[\"ordem_observacao\"] = expediente_materia.ordem_observacao\n\n \t dic_expediente_materia[\"des_numeracao\"]=\"\"\n numeracao = context.zsql.numeracao_obter_zsql(cod_materia=expediente_materia.cod_materia)\n if len(numeracao):\n numeracao = numeracao[0]\n dic_expediente_materia[\"des_numeracao\"] = str(numeracao.num_materia)+\"/\"+str(numeracao.ano_materia)\n\n dic_expediente_materia[\"nom_autor\"] = ''\n autoria = context.zsql.autoria_obter_zsql(cod_materia=expediente_materia.cod_materia, ind_primeiro_autor=1) \n if len(autoria) > 0: # se existe autor\n autoria = autoria[0]\n autor = context.zsql.autor_obter_zsql(cod_autor=autoria.cod_autor)\n if len(autor) > 0:\n autor = autor[0]\n \n if autor.des_tipo_autor == \"Parlamentar\":\n parlamentar = context.zsql.parlamentar_obter_zsql(cod_parlamentar=autor.cod_parlamentar)[0] \n dic_expediente_materia[\"nom_autor\"] = parlamentar.nom_parlamentar\n elif autor.des_tipo_autor == \"Comissao\":\n comissao = context.zsql.comissao_obter_zsql(cod_comissao=autor.cod_comissao)[0]\n dic_expediente_materia[\"nom_autor\"] = comissao.nom_comissao\n else:\n dic_expediente_materia[\"nom_autor\"] = autor.nom_autor\n \n dic_expediente_materia[\"des_turno\"]=\"\"\n dic_expediente_materia[\"des_situacao\"] = \"\"\n tramitacao = context.zsql.tramitacao_obter_zsql(cod_materia=expediente_materia.cod_materia, ind_ult_tramitacao=1)\n if len(tramitacao):\n tramitacao = tramitacao[0]\n if tramitacao.sgl_turno != \"\": \n for turno in [(\"P\",\"Primeiro\"), (\"S\",\"Segundo\"), (\"U\",\"Único\"), (\"F\",\"Final\"), (\"L\",\"Suplementar\"), (\"A\",\"Votação Única em Regime de Urgência\"), (\"B\",\"1ª Votação\"), (\"C\",\"2ª e 3ª Votações\")]:\n if tramitacao.sgl_turno == turno[0]:\n dic_expediente_materia[\"des_turno\"] = turno[1]\n\n dic_expediente_materia[\"des_situacao\"] = tramitacao.des_status\n if dic_expediente_materia[\"des_situacao\"]==None:\n dic_expediente_materia[\"des_situacao\"] = \" \"\n lst_expediente_materia.append(dic_expediente_materia)\n\n \n # Lista das matérias da Ordem do Dia, incluindo o status da tramitação\n lst_votacao=[]\n for votacao in context.zsql.votacao_ordem_dia_obter_zsql(dat_ordem=data,cod_sessao_plen=codigo,ind_excluido=0):\n \n # seleciona os detalhes de uma matéria\n materia = context.zsql.materia_obter_zsql(cod_materia=votacao.cod_materia)[0]\n\n dic_votacao = {}\n dic_votacao[\"num_ordem\"] = votacao.num_ordem\n dic_votacao[\"id_materia\"] = materia.sgl_tipo_materia+\" - \"+materia.des_tipo_materia+\" No. \"+str(materia.num_ident_basica)+\"/\"+str(materia.ano_ident_basica)\n dic_votacao[\"txt_ementa\"] = materia.txt_ementa\n dic_votacao[\"ordem_observacao\"] = votacao.ordem_observacao\n\n \t dic_votacao[\"des_numeracao\"]=\"\"\n numeracao = context.zsql.numeracao_obter_zsql(cod_materia=votacao.cod_materia)\n if len(numeracao):\n numeracao = numeracao[0]\n dic_votacao[\"des_numeracao\"] = str(numeracao.num_materia)+\"/\"+str(numeracao.ano_materia)\n\n dic_votacao[\"nom_autor\"] = ''\n autoria = context.zsql.autoria_obter_zsql(cod_materia=votacao.cod_materia, ind_primeiro_autor=1) \n if len(autoria) > 0: # se existe autor\n autoria = autoria[0]\n autor = context.zsql.autor_obter_zsql(cod_autor=autoria.cod_autor)\n if len(autor) > 0:\n autor = autor[0]\n \n if autor.des_tipo_autor == \"Parlamentar\":\n parlamentar = context.zsql.parlamentar_obter_zsql(cod_parlamentar=autor.cod_parlamentar)[0] \n dic_votacao[\"nom_autor\"] = parlamentar.nom_parlamentar\n elif autor.des_tipo_autor == \"Comissao\":\n comissao = context.zsql.comissao_obter_zsql(cod_comissao=autor.cod_comissao)[0]\n dic_votacao[\"nom_autor\"] = comissao.nom_comissao\n else:\n dic_votacao[\"nom_autor\"] = autor.nom_autor\n\n dic_votacao[\"des_turno\"]=\"\"\n dic_votacao[\"des_situacao\"] = \"\"\n tramitacao = context.zsql.tramitacao_obter_zsql(cod_materia=votacao.cod_materia, ind_ult_tramitacao=1)\n if len(tramitacao):\n tramitacao = tramitacao[0]\n if tramitacao.sgl_turno != \"\": \n for turno in [(\"P\",\"Primeiro\"), (\"S\",\"Segundo\"), (\"U\",\"Único\"), (\"L\",\"Suplementar\"), (\"A\",\"Votação Única em Regime de Urgência\"), (\"B\",\"1ª Votação\"), (\"C\",\"2ª e 3ª Votações\")]:\n if tramitacao.sgl_turno == turno[0]:\n dic_votacao[\"des_turno\"] = turno[1]\n\n dic_votacao[\"des_situacao\"] = tramitacao.des_status\n if dic_votacao[\"des_situacao\"]==None:\n dic_votacao[\"des_situacao\"] = \" \"\n lst_votacao.append(dic_votacao)\n\n # obtém as propriedades da casa legislativa para montar o cabeçalho e o rodapé da página\n cabecalho={}\n\n # tenta buscar o logotipo da casa LOGO_CASA\n if hasattr(context.sapl_documentos.props_sapl,'logo_casa.gif'):\n imagem = context.sapl_documentos.props_sapl['logo_casa.gif'].absolute_url()\n else:\n imagem = context.imagens.absolute_url() + \"/brasao_transp.gif\"\n \n #Abaixo é gerado o dic do rodapé da página (linha 7)\n casa={}\n aux=context.sapl_documentos.props_sapl.propertyItems()\n for item in aux:\n casa[item[0]]=item[1]\n localidade=context.zsql.localidade_obter_zsql(cod_localidade=casa[\"cod_localidade\"])\n data_emissao= DateTime().strftime(\"%d/%m/%Y\")\n rodape= casa\n rodape['data_emissao']= data_emissao\n\n inf_basicas_dic['nom_camara']= casa['nom_casa']\n REQUEST=context.REQUEST\n for local in context.zsql.localidade_obter_zsql(cod_localidade = casa['cod_localidade']):\n rodape['nom_localidade']= \" \"+local.nom_localidade\n rodape['sgl_uf']= local.sgl_uf\n\n# return lst_votacao\n sessao=session.id\n caminho = context.pdf_pauta_sessao_gerar(rodape, sessao, imagem, inf_basicas_dic, lst_votacao, lst_expediente_materia)\n if caminho=='aviso':\n return response.redirect('mensagem_emitir_proc')\n else:\n response.redirect(caminho)\n"},"license":{"kind":"string","value":"gpl-3.0"}}},{"rowIdx":2751,"cells":{"repo_name":{"kind":"string","value":"dwaynebailey/pootle"},"path":{"kind":"string","value":"pootle/apps/pootle_word/utils.py"},"copies":{"kind":"string","value":"5"},"size":{"kind":"string","value":"2811"},"content":{"kind":"string","value":"# -*- coding: utf-8 -*-\n#\n# Copyright (C) Pootle contributors.\n#\n# This file is a part of the Pootle project. It is distributed under the GPL3\n# or later license. See the LICENSE file for a copy of the license and the\n# AUTHORS file for copyright and authorship information.\n\nimport os\nimport re\n\nimport Levenshtein\nimport translate\n\nfrom django.utils.functional import cached_property\n\nfrom pootle.core.delegate import stemmer, stopwords\n\n\nclass Stopwords(object):\n\n @cached_property\n def words(self):\n ttk_path = translate.__path__[0]\n fpath = (\n os.path.join(ttk_path, \"share\", \"stoplist-en\")\n if \"share\" in os.listdir(ttk_path)\n else os.path.join(ttk_path, \"..\", \"share\", \"stoplist-en\"))\n words = set()\n with open(fpath) as f:\n for line in f.read().split(\"\\n\"):\n if not line:\n continue\n if line[0] in \"<>=@\":\n words.add(line[1:].strip().lower())\n return words\n\n\nclass TextStemmer(object):\n\n def __init__(self, context):\n self.context = context\n\n def split(self, words):\n return re.split(u\"[^\\w'-]+\", words)\n\n @property\n def stopwords(self):\n return stopwords.get().words\n\n @property\n def tokens(self):\n return [\n t.lower()\n for t\n in self.split(self.text)\n if (len(t) > 2\n and t.lower() not in self.stopwords)]\n\n @property\n def text(self):\n return self.context.source_f\n\n @property\n def stemmer(self):\n return stemmer.get()\n\n @property\n def stems(self):\n return self.get_stems(self.tokens)\n\n def get_stems(self, tokens):\n return set(self.stemmer(t) for t in tokens)\n\n\nclass TextComparison(TextStemmer):\n\n @property\n def text(self):\n return self.context\n\n def jaccard_similarity(self, other):\n return (\n len(other.stems.intersection(self.stems))\n / float(len(set(other.stems).union(self.stems))))\n\n def levenshtein_distance(self, other):\n return (\n Levenshtein.distance(self.text, other.text)\n / max(len(self.text), len(other.text)))\n\n def tokens_present(self, other):\n return (\n len(set(self.tokens).intersection(other.tokens))\n / float(len(other.tokens)))\n\n def stems_present(self, other):\n return (\n len(set(self.stems).intersection(other.stems))\n / float(len(other.stems)))\n\n def similarity(self, other):\n other = self.__class__(other)\n return (\n (self.jaccard_similarity(other)\n + self.levenshtein_distance(other)\n + self.tokens_present(other)\n + self.stems_present(other))\n / 4)\n"},"license":{"kind":"string","value":"gpl-3.0"}}},{"rowIdx":2752,"cells":{"repo_name":{"kind":"string","value":"yize/grunt-tps"},"path":{"kind":"string","value":"tasks/lib/python/Lib/python2.7/distutils/command/upload.py"},"copies":{"kind":"string","value":"176"},"size":{"kind":"string","value":"7002"},"content":{"kind":"string","value":"\"\"\"distutils.command.upload\n\nImplements the Distutils 'upload' subcommand (upload package to PyPI).\"\"\"\nimport os\nimport socket\nimport platform\nfrom urllib2 import urlopen, Request, HTTPError\nfrom base64 import standard_b64encode\nimport urlparse\nimport cStringIO as StringIO\nfrom hashlib import md5\n\nfrom distutils.errors import DistutilsOptionError\nfrom distutils.core import PyPIRCCommand\nfrom distutils.spawn import spawn\nfrom distutils import log\n\nclass upload(PyPIRCCommand):\n\n description = \"upload binary package to PyPI\"\n\n user_options = PyPIRCCommand.user_options + [\n ('sign', 's',\n 'sign files to upload using gpg'),\n ('identity=', 'i', 'GPG identity used to sign files'),\n ]\n\n boolean_options = PyPIRCCommand.boolean_options + ['sign']\n\n def initialize_options(self):\n PyPIRCCommand.initialize_options(self)\n self.username = ''\n self.password = ''\n self.show_response = 0\n self.sign = False\n self.identity = None\n\n def finalize_options(self):\n PyPIRCCommand.finalize_options(self)\n if self.identity and not self.sign:\n raise DistutilsOptionError(\n \"Must use --sign for --identity to have meaning\"\n )\n config = self._read_pypirc()\n if config != {}:\n self.username = config['username']\n self.password = config['password']\n self.repository = config['repository']\n self.realm = config['realm']\n\n # getting the password from the distribution\n # if previously set by the register command\n if not self.password and self.distribution.password:\n self.password = self.distribution.password\n\n def run(self):\n if not self.distribution.dist_files:\n raise DistutilsOptionError(\"No dist file created in earlier command\")\n for command, pyversion, filename in self.distribution.dist_files:\n self.upload_file(command, pyversion, filename)\n\n def upload_file(self, command, pyversion, filename):\n # Makes sure the repository URL is compliant\n schema, netloc, url, params, query, fragments = \\\n urlparse.urlparse(self.repository)\n if params or query or fragments:\n raise AssertionError(\"Incompatible url %s\" % self.repository)\n\n if schema not in ('http', 'https'):\n raise AssertionError(\"unsupported schema \" + schema)\n\n # Sign if requested\n if self.sign:\n gpg_args = [\"gpg\", \"--detach-sign\", \"-a\", filename]\n if self.identity:\n gpg_args[2:2] = [\"--local-user\", self.identity]\n spawn(gpg_args,\n dry_run=self.dry_run)\n\n # Fill in the data - send all the meta-data in case we need to\n # register a new release\n f = open(filename,'rb')\n try:\n content = f.read()\n finally:\n f.close()\n meta = self.distribution.metadata\n data = {\n # action\n ':action': 'file_upload',\n 'protcol_version': '1',\n\n # identify release\n 'name': meta.get_name(),\n 'version': meta.get_version(),\n\n # file content\n 'content': (os.path.basename(filename),content),\n 'filetype': command,\n 'pyversion': pyversion,\n 'md5_digest': md5(content).hexdigest(),\n\n # additional meta-data\n 'metadata_version' : '1.0',\n 'summary': meta.get_description(),\n 'home_page': meta.get_url(),\n 'author': meta.get_contact(),\n 'author_email': meta.get_contact_email(),\n 'license': meta.get_licence(),\n 'description': meta.get_long_description(),\n 'keywords': meta.get_keywords(),\n 'platform': meta.get_platforms(),\n 'classifiers': meta.get_classifiers(),\n 'download_url': meta.get_download_url(),\n # PEP 314\n 'provides': meta.get_provides(),\n 'requires': meta.get_requires(),\n 'obsoletes': meta.get_obsoletes(),\n }\n comment = ''\n if command == 'bdist_rpm':\n dist, version, id = platform.dist()\n if dist:\n comment = 'built for %s %s' % (dist, version)\n elif command == 'bdist_dumb':\n comment = 'built for %s' % platform.platform(terse=1)\n data['comment'] = comment\n\n if self.sign:\n data['gpg_signature'] = (os.path.basename(filename) + \".asc\",\n open(filename+\".asc\").read())\n\n # set up the authentication\n auth = \"Basic \" + standard_b64encode(self.username + \":\" +\n self.password)\n\n # Build up the MIME payload for the POST data\n boundary = '--------------GHSKFJDLGDS7543FJKLFHRE75642756743254'\n sep_boundary = '\\n--' + boundary\n end_boundary = sep_boundary + '--'\n body = StringIO.StringIO()\n for key, value in data.items():\n # handle multiple entries for the same name\n if not isinstance(value, list):\n value = [value]\n for value in value:\n if isinstance(value, tuple):\n fn = ';filename=\"%s\"' % value[0]\n value = value[1]\n else:\n fn = \"\"\n\n body.write(sep_boundary)\n body.write('\\nContent-Disposition: form-data; name=\"%s\"'%key)\n body.write(fn)\n body.write(\"\\n\\n\")\n body.write(value)\n if value and value[-1] == '\\r':\n body.write('\\n') # write an extra newline (lurve Macs)\n body.write(end_boundary)\n body.write(\"\\n\")\n body = body.getvalue()\n\n self.announce(\"Submitting %s to %s\" % (filename, self.repository), log.INFO)\n\n # build the Request\n headers = {'Content-type':\n 'multipart/form-data; boundary=%s' % boundary,\n 'Content-length': str(len(body)),\n 'Authorization': auth}\n\n request = Request(self.repository, data=body,\n headers=headers)\n # send the data\n try:\n result = urlopen(request)\n status = result.getcode()\n reason = result.msg\n if self.show_response:\n msg = '\\n'.join(('-' * 75, r.read(), '-' * 75))\n self.announce(msg, log.INFO)\n except socket.error, e:\n self.announce(str(e), log.ERROR)\n return\n except HTTPError, e:\n status = e.code\n reason = e.msg\n\n if status == 200:\n self.announce('Server response (%s): %s' % (status, reason),\n log.INFO)\n else:\n self.announce('Upload failed (%s): %s' % (status, reason),\n log.ERROR)\n"},"license":{"kind":"string","value":"mit"}}},{"rowIdx":2753,"cells":{"repo_name":{"kind":"string","value":"fitermay/intellij-community"},"path":{"kind":"string","value":"python/lib/Lib/_threading_local.py"},"copies":{"kind":"string","value":"91"},"size":{"kind":"string","value":"6946"},"content":{"kind":"string","value":"\"\"\"Thread-local objects.\n\n(Note that this module provides a Python version of the threading.local\n class. Depending on the version of Python you're using, there may be a\n faster one available. You should always import the `local` class from\n `threading`.)\n\nThread-local objects support the management of thread-local data.\nIf you have data that you want to be local to a thread, simply create\na thread-local object and use its attributes:\n\n >>> mydata = local()\n >>> mydata.number = 42\n >>> mydata.number\n 42\n\nYou can also access the local-object's dictionary:\n\n >>> mydata.__dict__\n {'number': 42}\n >>> mydata.__dict__.setdefault('widgets', [])\n []\n >>> mydata.widgets\n []\n\nWhat's important about thread-local objects is that their data are\nlocal to a thread. If we access the data in a different thread:\n\n >>> log = []\n >>> def f():\n ... items = mydata.__dict__.items()\n ... items.sort()\n ... log.append(items)\n ... mydata.number = 11\n ... log.append(mydata.number)\n\n >>> import threading\n >>> thread = threading.Thread(target=f)\n >>> thread.start()\n >>> thread.join()\n >>> log\n [[], 11]\n\nwe get different data. Furthermore, changes made in the other thread\ndon't affect data seen in this thread:\n\n >>> mydata.number\n 42\n\nOf course, values you get from a local object, including a __dict__\nattribute, are for whatever thread was current at the time the\nattribute was read. For that reason, you generally don't want to save\nthese values across threads, as they apply only to the thread they\ncame from.\n\nYou can create custom local objects by subclassing the local class:\n\n >>> class MyLocal(local):\n ... number = 2\n ... initialized = False\n ... def __init__(self, **kw):\n ... if self.initialized:\n ... raise SystemError('__init__ called too many times')\n ... self.initialized = True\n ... self.__dict__.update(kw)\n ... def squared(self):\n ... return self.number ** 2\n\nThis can be useful to support default values, methods and\ninitialization. Note that if you define an __init__ method, it will be\ncalled each time the local object is used in a separate thread. This\nis necessary to initialize each thread's dictionary.\n\nNow if we create a local object:\n\n >>> mydata = MyLocal(color='red')\n\nNow we have a default number:\n\n >>> mydata.number\n 2\n\nan initial color:\n\n >>> mydata.color\n 'red'\n >>> del mydata.color\n\nAnd a method that operates on the data:\n\n >>> mydata.squared()\n 4\n\nAs before, we can access the data in a separate thread:\n\n >>> log = []\n >>> thread = threading.Thread(target=f)\n >>> thread.start()\n >>> thread.join()\n >>> log\n [[('color', 'red'), ('initialized', True)], 11]\n\nwithout affecting this thread's data:\n\n >>> mydata.number\n 2\n >>> mydata.color\n Traceback (most recent call last):\n ...\n AttributeError: 'MyLocal' object has no attribute 'color'\n\nNote that subclasses can define slots, but they are not thread\nlocal. They are shared across threads:\n\n >>> class MyLocal(local):\n ... __slots__ = 'number'\n\n >>> mydata = MyLocal()\n >>> mydata.number = 42\n >>> mydata.color = 'red'\n\nSo, the separate thread:\n\n >>> thread = threading.Thread(target=f)\n >>> thread.start()\n >>> thread.join()\n\naffects what we see:\n\n >>> mydata.number\n 11\n\n>>> del mydata\n\"\"\"\n\n__all__ = [\"local\"]\n\n# We need to use objects from the threading module, but the threading\n# module may also want to use our `local` class, if support for locals\n# isn't compiled in to the `thread` module. This creates potential problems\n# with circular imports. For that reason, we don't import `threading`\n# until the bottom of this file (a hack sufficient to worm around the\n# potential problems). Note that almost all platforms do have support for\n# locals in the `thread` module, and there is no circular import problem\n# then, so problems introduced by fiddling the order of imports here won't\n# manifest on most boxes.\n\nclass _localbase(object):\n __slots__ = '_local__key', '_local__args', '_local__lock'\n\n def __new__(cls, *args, **kw):\n self = object.__new__(cls)\n key = '_local__key', 'thread.local.' + str(id(self))\n object.__setattr__(self, '_local__key', key)\n object.__setattr__(self, '_local__args', (args, kw))\n object.__setattr__(self, '_local__lock', RLock())\n\n if args or kw and (cls.__init__ is object.__init__):\n raise TypeError(\"Initialization arguments are not supported\")\n\n # We need to create the thread dict in anticipation of\n # __init__ being called, to make sure we don't call it\n # again ourselves.\n dict = object.__getattribute__(self, '__dict__')\n currentThread().__dict__[key] = dict\n\n return self\n\ndef _patch(self):\n key = object.__getattribute__(self, '_local__key')\n d = currentThread().__dict__.get(key)\n if d is None:\n d = {}\n currentThread().__dict__[key] = d\n object.__setattr__(self, '__dict__', d)\n\n # we have a new instance dict, so call out __init__ if we have\n # one\n cls = type(self)\n if cls.__init__ is not object.__init__:\n args, kw = object.__getattribute__(self, '_local__args')\n cls.__init__(self, *args, **kw)\n else:\n object.__setattr__(self, '__dict__', d)\n\nclass local(_localbase):\n\n def __getattribute__(self, name):\n lock = object.__getattribute__(self, '_local__lock')\n lock.acquire()\n try:\n _patch(self)\n return object.__getattribute__(self, name)\n finally:\n lock.release()\n\n def __setattr__(self, name, value):\n lock = object.__getattribute__(self, '_local__lock')\n lock.acquire()\n try:\n _patch(self)\n return object.__setattr__(self, name, value)\n finally:\n lock.release()\n\n def __delattr__(self, name):\n lock = object.__getattribute__(self, '_local__lock')\n lock.acquire()\n try:\n _patch(self)\n return object.__delattr__(self, name)\n finally:\n lock.release()\n\n def __del__(self):\n import threading\n\n key = object.__getattribute__(self, '_local__key')\n\n try:\n threads = list(threading.enumerate())\n except:\n # If enumerate fails, as it seems to do during\n # shutdown, we'll skip cleanup under the assumption\n # that there is nothing to clean up.\n return\n\n for thread in threads:\n try:\n __dict__ = thread.__dict__\n except AttributeError:\n # Thread is dying, rest in peace.\n continue\n\n if key in __dict__:\n try:\n del __dict__[key]\n except KeyError:\n pass # didn't have anything in this thread\n\nfrom threading import currentThread, RLock\n"},"license":{"kind":"string","value":"apache-2.0"}}},{"rowIdx":2754,"cells":{"repo_name":{"kind":"string","value":"virgree/odoo"},"path":{"kind":"string","value":"addons/l10n_uy/__openerp__.py"},"copies":{"kind":"string","value":"260"},"size":{"kind":"string","value":"1807"},"content":{"kind":"string","value":"# -*- encoding: utf-8 -*-\n##############################################################################\n#\n# OpenERP, Open Source Management Solution\n# Copyright (c) 2011 Openerp.uy \n# Proyecto de Localización de OperERP para Uruguay\n# $Id$\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Affero General Public License as\n# published by the Free Software Foundation, either version 3 of the\n# License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Affero General Public License for more details.\n#\n# You should have received a copy of the GNU Affero General Public License\n# along with this program. If not, see .\n#\n##############################################################################\n\n{\n 'name': 'Uruguay - Chart of Accounts',\n 'version': '0.1',\n 'author': 'Uruguay l10n Team & Guillem Barba',\n 'category': 'Localization/Account Charts',\n 'website': 'https://launchpad.net/openerp-uruguay',\n 'description': \"\"\"\nGeneral Chart of Accounts.\n==========================\n\nProvide Templates for Chart of Accounts, Taxes for Uruguay.\n\n\"\"\",\n 'license': 'AGPL-3',\n 'depends': ['account'],\n 'data': [\n 'account_types.xml',\n 'taxes_code_template.xml',\n 'account_chart_template.xml',\n 'taxes_template.xml',\n 'l10n_uy_wizard.xml',\n ],\n 'demo': [],\n 'auto_install': False,\n 'installable': True,\n}\n\n# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:\n"},"license":{"kind":"string","value":"agpl-3.0"}}},{"rowIdx":2755,"cells":{"repo_name":{"kind":"string","value":"simonwydooghe/ansible"},"path":{"kind":"string","value":"test/units/modules/storage/netapp/test_na_ontap_nvme_namespace.py"},"copies":{"kind":"string","value":"48"},"size":{"kind":"string","value":"7361"},"content":{"kind":"string","value":"# (c) 2018, NetApp, Inc\n# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)\n\n''' unit tests ONTAP Ansible module: na_ontap_nvme_namespace'''\n\nfrom __future__ import print_function\nimport json\nimport pytest\n\nfrom units.compat import unittest\nfrom units.compat.mock import patch\nfrom ansible.module_utils import basic\nfrom ansible.module_utils._text import to_bytes\nimport ansible.module_utils.netapp as netapp_utils\n\nfrom ansible.modules.storage.netapp.na_ontap_nvme_namespace \\\n import NetAppONTAPNVMENamespace as my_module\n\nif not netapp_utils.has_netapp_lib():\n pytestmark = pytest.mark.skip('skipping as missing required netapp_lib')\n\n\ndef set_module_args(args):\n \"\"\"prepare arguments so that they will be picked up during module creation\"\"\"\n args = json.dumps({'ANSIBLE_MODULE_ARGS': args})\n basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access\n\n\nclass AnsibleExitJson(Exception):\n \"\"\"Exception class to be raised by module.exit_json and caught by the test case\"\"\"\n pass\n\n\nclass AnsibleFailJson(Exception):\n \"\"\"Exception class to be raised by module.fail_json and caught by the test case\"\"\"\n pass\n\n\ndef exit_json(*args, **kwargs): # pylint: disable=unused-argument\n \"\"\"function to patch over exit_json; package return data into an exception\"\"\"\n if 'changed' not in kwargs:\n kwargs['changed'] = False\n raise AnsibleExitJson(kwargs)\n\n\ndef fail_json(*args, **kwargs): # pylint: disable=unused-argument\n \"\"\"function to patch over fail_json; package return data into an exception\"\"\"\n kwargs['failed'] = True\n raise AnsibleFailJson(kwargs)\n\n\nclass MockONTAPConnection(object):\n ''' mock server connection to ONTAP host '''\n\n def __init__(self, kind=None):\n ''' save arguments '''\n self.type = kind\n self.xml_in = None\n self.xml_out = None\n\n def invoke_successfully(self, xml, enable_tunneling): # pylint: disable=unused-argument\n ''' mock invoke_successfully returning xml data '''\n self.xml_in = xml\n if self.type == 'namespace':\n xml = self.build_namespace_info()\n elif self.type == 'quota_fail':\n raise netapp_utils.zapi.NaApiError(code='TEST', message=\"This exception is from the unit test\")\n self.xml_out = xml\n return xml\n\n @staticmethod\n def build_namespace_info():\n ''' build xml data for namespace-info '''\n xml = netapp_utils.zapi.NaElement('xml')\n data = {'num-records': 2,\n 'attributes-list': [{'nvme-namespace-info': {'path': 'abcd/vol'}},\n {'nvme-namespace-info': {'path': 'xyz/vol'}}]}\n xml.translate_struct(data)\n return xml\n\n\nclass TestMyModule(unittest.TestCase):\n ''' a group of related Unit Tests '''\n\n def setUp(self):\n self.mock_module_helper = patch.multiple(basic.AnsibleModule,\n exit_json=exit_json,\n fail_json=fail_json)\n self.mock_module_helper.start()\n self.addCleanup(self.mock_module_helper.stop)\n self.server = MockONTAPConnection()\n self.onbox = False\n\n def set_default_args(self):\n if self.onbox:\n hostname = '10.193.75.3'\n username = 'admin'\n password = 'netapp1!'\n vserver = 'ansible'\n ostype = 'linux'\n path = 'abcd/vol'\n size = 20\n else:\n hostname = 'hostname'\n username = 'username'\n password = 'password'\n vserver = 'vserver'\n ostype = 'linux'\n path = 'abcd/vol'\n size = 20\n return dict({\n 'hostname': hostname,\n 'username': username,\n 'password': password,\n 'ostype': ostype,\n 'vserver': vserver,\n 'path': path,\n 'size': size\n })\n\n def test_module_fail_when_required_args_missing(self):\n ''' required arguments are reported as errors '''\n with pytest.raises(AnsibleFailJson) as exc:\n set_module_args({})\n my_module()\n print('Info: %s' % exc.value.args[0]['msg'])\n\n def test_ensure_get_called(self):\n ''' test get_namespace() for non-existent namespace'''\n set_module_args(self.set_default_args())\n my_obj = my_module()\n my_obj.server = self.server\n assert my_obj.get_namespace() is None\n\n def test_ensure_get_called_existing(self):\n ''' test get_namespace() for existing namespace'''\n set_module_args(self.set_default_args())\n my_obj = my_module()\n my_obj.server = MockONTAPConnection(kind='namespace')\n assert my_obj.get_namespace()\n\n @patch('ansible.modules.storage.netapp.na_ontap_nvme_namespace.NetAppONTAPNVMENamespace.create_namespace')\n def test_successful_create(self, create_namespace):\n ''' creating namespace and testing idempotency '''\n set_module_args(self.set_default_args())\n my_obj = my_module()\n if not self.onbox:\n my_obj.server = self.server\n with pytest.raises(AnsibleExitJson) as exc:\n my_obj.apply()\n assert exc.value.args[0]['changed']\n create_namespace.assert_called_with()\n # to reset na_helper from remembering the previous 'changed' value\n my_obj = my_module()\n if not self.onbox:\n my_obj.server = MockONTAPConnection('namespace')\n with pytest.raises(AnsibleExitJson) as exc:\n my_obj.apply()\n assert not exc.value.args[0]['changed']\n\n @patch('ansible.modules.storage.netapp.na_ontap_nvme_namespace.NetAppONTAPNVMENamespace.delete_namespace')\n def test_successful_delete(self, delete_namespace):\n ''' deleting namespace and testing idempotency '''\n data = self.set_default_args()\n data['state'] = 'absent'\n set_module_args(data)\n my_obj = my_module()\n if not self.onbox:\n my_obj.server = MockONTAPConnection('namespace')\n with pytest.raises(AnsibleExitJson) as exc:\n my_obj.apply()\n assert exc.value.args[0]['changed']\n delete_namespace.assert_called_with()\n # to reset na_helper from remembering the previous 'changed' value\n my_obj = my_module()\n if not self.onbox:\n my_obj.server = self.server\n with pytest.raises(AnsibleExitJson) as exc:\n my_obj.apply()\n assert not exc.value.args[0]['changed']\n\n def test_if_all_methods_catch_exception(self):\n module_args = {}\n module_args.update(self.set_default_args())\n set_module_args(module_args)\n my_obj = my_module()\n if not self.onbox:\n my_obj.server = MockONTAPConnection('quota_fail')\n with pytest.raises(AnsibleFailJson) as exc:\n my_obj.get_namespace()\n assert 'Error fetching namespace info:' in exc.value.args[0]['msg']\n with pytest.raises(AnsibleFailJson) as exc:\n my_obj.create_namespace()\n assert 'Error creating namespace for path' in exc.value.args[0]['msg']\n with pytest.raises(AnsibleFailJson) as exc:\n my_obj.delete_namespace()\n assert 'Error deleting namespace for path' in exc.value.args[0]['msg']\n"},"license":{"kind":"string","value":"gpl-3.0"}}},{"rowIdx":2756,"cells":{"repo_name":{"kind":"string","value":"icereval/osf.io"},"path":{"kind":"string","value":"api/requests/serializers.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"3573"},"content":{"kind":"string","value":"from django.db import IntegrityError\nfrom rest_framework import exceptions\nfrom rest_framework import serializers as ser\n\nfrom api.base.exceptions import Conflict\nfrom api.base.utils import absolute_reverse, get_user_auth\nfrom api.base.serializers import JSONAPISerializer, LinksField, VersionedDateTimeField, RelationshipField\nfrom osf.models import NodeRequest\nfrom osf.utils.workflows import DefaultStates, RequestTypes\n\n\nclass NodeRequestSerializer(JSONAPISerializer):\n class Meta:\n type_ = 'node-requests'\n\n filterable_fields = frozenset([\n 'creator',\n 'request_type',\n 'machine_state',\n 'created',\n 'id'\n ])\n id = ser.CharField(source='_id', read_only=True)\n request_type = ser.ChoiceField(read_only=True, required=False, choices=RequestTypes.choices())\n machine_state = ser.ChoiceField(read_only=True, required=False, choices=DefaultStates.choices())\n comment = ser.CharField(required=False, allow_blank=True, max_length=65535)\n created = VersionedDateTimeField(read_only=True)\n modified = VersionedDateTimeField(read_only=True)\n date_last_transitioned = VersionedDateTimeField(read_only=True)\n\n target = RelationshipField(\n read_only=True,\n related_view='nodes:node-detail',\n related_view_kwargs={'node_id': ''},\n filter_key='target___id',\n )\n\n creator = RelationshipField(\n read_only=True,\n related_view='users:user-detail',\n related_view_kwargs={'user_id': ''},\n filter_key='creator___id',\n )\n\n links = LinksField({\n 'self': 'get_absolute_url',\n 'target': 'get_target_url'\n })\n\n def get_absolute_url(self, obj):\n return absolute_reverse('requests:node-request-detail', kwargs={'request_id': obj._id, 'version': self.context['request'].parser_context['kwargs']['version']})\n\n def get_target_url(self, obj):\n return absolute_reverse('nodes:node-detail', kwargs={'node_id': obj.target._id, 'version': self.context['request'].parser_context['kwargs']['version']})\n\n def create(self, validated_data):\n raise NotImplementedError()\n\nclass NodeRequestCreateSerializer(NodeRequestSerializer):\n request_type = ser.ChoiceField(required=True, choices=RequestTypes.choices())\n\n def create(self, validated_data):\n auth = get_user_auth(self.context['request'])\n if not auth.user:\n raise exceptions.PermissionDenied\n\n try:\n node = self.context['view'].get_node()\n except exceptions.PermissionDenied:\n node = self.context['view'].get_node(check_object_permissions=False)\n if auth.user in node.contributors:\n raise exceptions.PermissionDenied('You cannot request access to a node you contribute to.')\n raise\n\n comment = validated_data.pop('comment', '')\n request_type = validated_data.pop('request_type', None)\n\n if not request_type:\n raise exceptions.ValidationError('You must specify a valid request_type.')\n\n try:\n node_request = NodeRequest.objects.create(\n target=node,\n creator=auth.user,\n comment=comment,\n machine_state=DefaultStates.INITIAL.value,\n request_type=request_type\n )\n node_request.save()\n except IntegrityError:\n raise Conflict('Users may not have more than one {} request per node.'.format(request_type))\n node_request.run_submit(auth.user)\n return node_request\n"},"license":{"kind":"string","value":"apache-2.0"}}},{"rowIdx":2757,"cells":{"repo_name":{"kind":"string","value":"shsingh/ansible"},"path":{"kind":"string","value":"lib/ansible/modules/database/postgresql/postgresql_ext.py"},"copies":{"kind":"string","value":"2"},"size":{"kind":"string","value":"13576"},"content":{"kind":"string","value":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\n# Copyright: Ansible Project\n# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)\n\nfrom __future__ import absolute_import, division, print_function\n__metaclass__ = type\n\nANSIBLE_METADATA = {'metadata_version': '1.1',\n 'status': ['preview'],\n 'supported_by': 'community'}\n\nDOCUMENTATION = r'''\n---\nmodule: postgresql_ext\nshort_description: Add or remove PostgreSQL extensions from a database\ndescription:\n- Add or remove PostgreSQL extensions from a database.\nversion_added: '1.9'\noptions:\n name:\n description:\n - Name of the extension to add or remove.\n required: true\n type: str\n aliases:\n - ext\n db:\n description:\n - Name of the database to add or remove the extension to/from.\n required: true\n type: str\n aliases:\n - login_db\n schema:\n description:\n - Name of the schema to add the extension to.\n version_added: '2.8'\n type: str\n session_role:\n description:\n - Switch to session_role after connecting.\n - The specified session_role must be a role that the current login_user is a member of.\n - Permissions checking for SQL commands is carried out as though the session_role were the one that had logged in originally.\n type: str\n version_added: '2.8'\n state:\n description:\n - The database extension state.\n default: present\n choices: [ absent, present ]\n type: str\n cascade:\n description:\n - Automatically install/remove any extensions that this extension depends on\n that are not already installed/removed (supported since PostgreSQL 9.6).\n type: bool\n default: no\n version_added: '2.8'\n login_unix_socket:\n description:\n - Path to a Unix domain socket for local connections.\n type: str\n version_added: '2.8'\n ssl_mode:\n description:\n - Determines whether or with what priority a secure SSL TCP/IP connection will be negotiated with the server.\n - See U(https://www.postgresql.org/docs/current/static/libpq-ssl.html) for more information on the modes.\n - Default of C(prefer) matches libpq default.\n type: str\n default: prefer\n choices: [ allow, disable, prefer, require, verify-ca, verify-full ]\n version_added: '2.8'\n ca_cert:\n description:\n - Specifies the name of a file containing SSL certificate authority (CA) certificate(s).\n - If the file exists, the server's certificate will be verified to be signed by one of these authorities.\n type: str\n aliases: [ ssl_rootcert ]\n version_added: '2.8'\n version:\n description:\n - Extension version to add or update to. Has effect with I(state=present) only.\n - If not specified, the latest extension version will be created.\n - It can't downgrade an extension version.\n When version downgrade is needed, remove the extension and create new one with appropriate version.\n - Set I(version=latest) to update the extension to the latest available version.\n type: str\n version_added: '2.9'\nseealso:\n- name: PostgreSQL extensions\n description: General information about PostgreSQL extensions.\n link: https://www.postgresql.org/docs/current/external-extensions.html\n- name: CREATE EXTENSION reference\n description: Complete reference of the CREATE EXTENSION command documentation.\n link: https://www.postgresql.org/docs/current/sql-createextension.html\n- name: ALTER EXTENSION reference\n description: Complete reference of the ALTER EXTENSION command documentation.\n link: https://www.postgresql.org/docs/current/sql-alterextension.html\n- name: DROP EXTENSION reference\n description: Complete reference of the DROP EXTENSION command documentation.\n link: https://www.postgresql.org/docs/current/sql-droppublication.html\nnotes:\n- The default authentication assumes that you are either logging in as\n or sudo'ing to the C(postgres) account on the host.\n- This module uses I(psycopg2), a Python PostgreSQL database adapter.\n- You must ensure that C(psycopg2) is installed on the host before using this module.\n- If the remote host is the PostgreSQL server (which is the default case),\n then PostgreSQL must also be installed on the remote host.\n- For Ubuntu-based systems, install the C(postgresql), C(libpq-dev),\n and C(python-psycopg2) packages on the remote host before using this module.\nrequirements: [ psycopg2 ]\nauthor:\n- Daniel Schep (@dschep)\n- Thomas O'Donnell (@andytom)\n- Sandro Santilli (@strk)\n- Andrew Klychkov (@Andersson007)\nextends_documentation_fragment: postgres\n'''\n\nEXAMPLES = r'''\n- name: Adds postgis extension to the database acme in the schema foo\n postgresql_ext:\n name: postgis\n db: acme\n schema: foo\n\n- name: Removes postgis extension to the database acme\n postgresql_ext:\n name: postgis\n db: acme\n state: absent\n\n- name: Adds earthdistance extension to the database template1 cascade\n postgresql_ext:\n name: earthdistance\n db: template1\n cascade: true\n\n# In the example below, if earthdistance extension is installed,\n# it will be removed too because it depends on cube:\n- name: Removes cube extension from the database acme cascade\n postgresql_ext:\n name: cube\n db: acme\n cascade: yes\n state: absent\n\n- name: Create extension foo of version 1.2 or update it if it's already created\n postgresql_ext:\n db: acme\n name: foo\n version: 1.2\n\n- name: Assuming extension foo is created, update it to the latest version\n postgresql_ext:\n db: acme\n name: foo\n version: latest\n'''\n\nRETURN = r'''\nquery:\n description: List of executed queries.\n returned: always\n type: list\n sample: [\"DROP EXTENSION \\\"acme\\\"\"]\n\n'''\n\nimport traceback\n\nfrom distutils.version import LooseVersion\n\ntry:\n from psycopg2.extras import DictCursor\nexcept ImportError:\n # psycopg2 is checked by connect_to_db()\n # from ansible.module_utils.postgres\n pass\n\nfrom ansible.module_utils.basic import AnsibleModule\nfrom ansible.module_utils.postgres import (\n connect_to_db,\n get_conn_params,\n postgres_common_argument_spec,\n)\nfrom ansible.module_utils._text import to_native\n\nexecuted_queries = []\n\n\nclass NotSupportedError(Exception):\n pass\n\n\n# ===========================================\n# PostgreSQL module specific support methods.\n#\n\ndef ext_exists(cursor, ext):\n query = \"SELECT * FROM pg_extension WHERE extname=%(ext)s\"\n cursor.execute(query, {'ext': ext})\n return cursor.rowcount == 1\n\n\ndef ext_delete(cursor, ext, cascade):\n if ext_exists(cursor, ext):\n query = \"DROP EXTENSION \\\"%s\\\"\" % ext\n if cascade:\n query += \" CASCADE\"\n cursor.execute(query)\n executed_queries.append(query)\n return True\n else:\n return False\n\n\ndef ext_update_version(cursor, ext, version):\n \"\"\"Update extension version.\n\n Return True if success.\n\n Args:\n cursor (cursor) -- cursor object of psycopg2 library\n ext (str) -- extension name\n version (str) -- extension version\n \"\"\"\n if version != 'latest':\n query = (\"ALTER EXTENSION \\\"%s\\\"\" % ext)\n cursor.execute(query + \" UPDATE TO %(ver)s\", {'ver': version})\n executed_queries.append(cursor.mogrify(query + \" UPDATE TO %(ver)s\", {'ver': version}))\n else:\n query = (\"ALTER EXTENSION \\\"%s\\\" UPDATE\" % ext)\n cursor.execute(query)\n executed_queries.append(query)\n return True\n\n\ndef ext_create(cursor, ext, schema, cascade, version):\n query = \"CREATE EXTENSION \\\"%s\\\"\" % ext\n if schema:\n query += \" WITH SCHEMA \\\"%s\\\"\" % schema\n if version:\n query += \" VERSION %(ver)s\"\n if cascade:\n query += \" CASCADE\"\n\n if version:\n cursor.execute(query, {'ver': version})\n executed_queries.append(cursor.mogrify(query, {'ver': version}))\n else:\n cursor.execute(query)\n executed_queries.append(query)\n return True\n\n\ndef ext_get_versions(cursor, ext):\n \"\"\"\n Get the current created extension version and available versions.\n\n Return tuple (current_version, [list of available versions]).\n\n Note: the list of available versions contains only versions\n that higher than the current created version.\n If the extension is not created, this list will contain all\n available versions.\n\n Args:\n cursor (cursor) -- cursor object of psycopg2 library\n ext (str) -- extension name\n \"\"\"\n\n # 1. Get the current extension version:\n query = (\"SELECT extversion FROM pg_catalog.pg_extension \"\n \"WHERE extname = %(ext)s\")\n\n current_version = '0'\n cursor.execute(query, {'ext': ext})\n res = cursor.fetchone()\n if res:\n current_version = res[0]\n\n # 2. Get available versions:\n query = (\"SELECT version FROM pg_available_extension_versions \"\n \"WHERE name = %(ext)s\")\n cursor.execute(query, {'ext': ext})\n res = cursor.fetchall()\n\n available_versions = []\n if res:\n # Make the list of available versions:\n for line in res:\n if LooseVersion(line[0]) > LooseVersion(current_version):\n available_versions.append(line['version'])\n\n if current_version == '0':\n current_version = False\n\n return (current_version, available_versions)\n\n# ===========================================\n# Module execution.\n#\n\n\ndef main():\n argument_spec = postgres_common_argument_spec()\n argument_spec.update(\n db=dict(type=\"str\", required=True, aliases=[\"login_db\"]),\n ext=dict(type=\"str\", required=True, aliases=[\"name\"]),\n schema=dict(type=\"str\"),\n state=dict(type=\"str\", default=\"present\", choices=[\"absent\", \"present\"]),\n cascade=dict(type=\"bool\", default=False),\n session_role=dict(type=\"str\"),\n version=dict(type=\"str\"),\n )\n\n module = AnsibleModule(\n argument_spec=argument_spec,\n supports_check_mode=True,\n )\n\n ext = module.params[\"ext\"]\n schema = module.params[\"schema\"]\n state = module.params[\"state\"]\n cascade = module.params[\"cascade\"]\n version = module.params[\"version\"]\n changed = False\n\n if version and state == 'absent':\n module.warn(\"Parameter version is ignored when state=absent\")\n\n conn_params = get_conn_params(module, module.params)\n db_connection = connect_to_db(module, conn_params, autocommit=True)\n cursor = db_connection.cursor(cursor_factory=DictCursor)\n\n try:\n # Get extension info and available versions:\n curr_version, available_versions = ext_get_versions(cursor, ext)\n\n if state == \"present\":\n if version == 'latest':\n if available_versions:\n version = available_versions[-1]\n else:\n version = ''\n\n if version:\n # If the specific version is passed and it is not available for update:\n if version not in available_versions:\n if not curr_version:\n module.fail_json(msg=\"Passed version '%s' is not available\" % version)\n\n elif LooseVersion(curr_version) == LooseVersion(version):\n changed = False\n\n else:\n module.fail_json(msg=\"Passed version '%s' is lower than \"\n \"the current created version '%s' or \"\n \"the passed version is not available\" % (version, curr_version))\n\n # If the specific version is passed and it is higher that the current version:\n if curr_version and version:\n if LooseVersion(curr_version) < LooseVersion(version):\n if module.check_mode:\n changed = True\n else:\n changed = ext_update_version(cursor, ext, version)\n\n # If the specific version is passed and it is created now:\n if curr_version == version:\n changed = False\n\n # If the ext doesn't exist and installed:\n elif not curr_version and available_versions:\n if module.check_mode:\n changed = True\n else:\n changed = ext_create(cursor, ext, schema, cascade, version)\n\n # If version is not passed:\n else:\n if not curr_version:\n # If the ext doesn't exist and it's installed:\n if available_versions:\n if module.check_mode:\n changed = True\n else:\n changed = ext_create(cursor, ext, schema, cascade, version)\n\n # If the ext doesn't exist and not installed:\n else:\n module.fail_json(msg=\"Extension %s is not installed\" % ext)\n\n elif state == \"absent\":\n if curr_version:\n if module.check_mode:\n changed = True\n else:\n changed = ext_delete(cursor, ext, cascade)\n else:\n changed = False\n\n except Exception as e:\n db_connection.close()\n module.fail_json(msg=\"Database query failed: %s\" % to_native(e), exception=traceback.format_exc())\n\n db_connection.close()\n module.exit_json(changed=changed, db=module.params[\"db\"], ext=ext, queries=executed_queries)\n\n\nif __name__ == '__main__':\n main()\n"},"license":{"kind":"string","value":"gpl-3.0"}}},{"rowIdx":2758,"cells":{"repo_name":{"kind":"string","value":"DayGitH/Python-Challenges"},"path":{"kind":"string","value":"DailyProgrammer/DP20160323B.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"5877"},"content":{"kind":"string","value":"\"\"\"\n[2016-03-23] Challenge #259 [Intermediate] Mahjong Hands\n\nhttps://www.reddit.com/r/dailyprogrammer/comments/4bmdwz/20160323_challenge_259_intermediate_mahjong_hands/\n\n# Description\nYou are the biggest, baddest mahjong player around. Your enemies tremble at your presence on the battlefield, and you\ncan barely walk ten steps before a fan begs you for an autograph.\nHowever, you have a dark secret that would ruin you if it ever came to light. You're terrible at determining whether a\nhand is a winning hand. For now, you've been able to bluff and bluster your way, but you know that one day you won't be\nable to get away with it.\nAs such, you've decided to write a program to assist you!\n## Further Details\nMahjong (not to be confused with [mahjong solitaire](http://en.wikipedia.org/wiki/Mahjong_solitaire)) is a game where\nhands are composed from combinations of tiles. There are a number of variants of mahjong, but for this challenge, we\nwill consider a simplified variant of Japanese Mahjong which is also known as Riichi Mahjong.\n## Basic Version\nThere are three suits in this variant, \"Bamboo\", \"Circle\" and \"Character\". Every tile that belongs to these suits has a\nvalue that ranges from 1 - 9.\nTo complete a hand, tiles are organised into groups. If every tile in a hand belongs to a single group (and each tile\ncan only be used once), the hand is a winning hand.\nFor now, we shall consider the groups \"Pair\", \"Set\" and \"Sequence\". They are composed as follows:\nPair - Two tiles with the same suit and value\nSet - Three tiles with the same suit and value\nSequence - Three tiles with the same suit, and which increment in value, such as \"Circle 2, Circle 3, Circle 4\". There\nis no value wrapping so \"Circle 9, Circle 1, Circle 2\" would not be considered valid.\nA hand is composed of 14 tiles.\n## Bonus 1 - Adding Quads\nThere is actually a fourth group called a \"Quad\". It is just like a pair and a set, except it is composed of four tiles.\nWhat makes this group special is that a hand containing quads will actually have a hand larger than 14, 1 for every\nquad. This is fine, as long as there is *1, and only 1 pair*.\n## Bonus 2 - Adding Honour Tiles\nIn addition to the tiles belonging to the three suits, there are 7 additional tiles. These tiles have no value, and are\ncollectively known as \"honour\" tiles.\nAs they have no value, they cannot be members of a sequence. Furthermore, they can only be part of a set or pair with\ntiles that are exactly the same. For example, \"Red Dragon, Red Dragon, Red Dragon\" would be a valid set, but \"Red\nDragon, Green Dragon, Red Dragon\" would not.\nThese additional tiles are:\n* Green Dragon\n* Red Dragon\n* White Dragon\n* North Wind\n* East Wind\n* South Wind\n* West Wind\n## Bonus 3 - Seven Pairs\nThere are a number of special hands that are an exception to the above rules. One such hand is \"Seven Pairs\". As the\nname suggests, it is a hand composed of seven pairs.\n# Formal Inputs & Outputs\n## Input description\n### Basic\nYou will be provided with N on a single line, followed by N lines of the following format:\n,\n### Bonus 2\nIn addition, the lines may be of the format:\n\n## Output description\nYou should output whether the hand is a winning hand or not.\n# Sample Inputs and Outputs\n## Sample Input (Standard)\n 14\n Circle,4\n Circle,5\n Circle,6\n Bamboo,1\n Bamboo,2\n Bamboo,3\n Character,2\n Character,2\n Character,2\n Circle,1\n Circle,1\n Bamboo,7\n Bamboo,8\n Bamboo,9\n## Sample Output (Standard)\n Winning hand\n## Sample Input (Standard)\n 14\n Circle,4\n Bamboo,1\n Circle,5\n Bamboo,2\n Character,2\n Bamboo,3\n Character,2\n Circle,6\n Character,2\n Circle,1\n Bamboo,8\n Circle,1\n Bamboo,7\n Bamboo,9\n## Sample Output (Standard)\n Winning hand\n## Sample Input (Standard)\n 14\n Circle,4\n Circle,5\n Circle,6\n Circle,4\n Circle,5\n Circle,6\n Circle,1\n Circle,1\n Bamboo,7\n Bamboo,8\n Bamboo,9\n Circle,4\n Circle,5\n Circle,6\n## Sample Output (Standard)\n Winning hand\n## Sample Input (Bonus 1)\n 15\n Circle,4\n Circle,5\n Circle,6\n Bamboo,1\n Bamboo,2\n Bamboo,3\n Character,2\n Character,2\n Character,2\n Character,2\n Circle,1\n Circle,1\n Bamboo,7\n Bamboo,8\n Bamboo,9\n## Sample Output (Bonus 1)\n Winning hand\n## Sample Input (Bonus 1)\n 16\n Circle,4\n Circle,5\n Circle,6\n Bamboo,1\n Bamboo,2\n Bamboo,3\n Character,2\n Character,2\n Character,2\n Character,2\n Circle,1\n Circle,1\n Circle,1\n Bamboo,7\n Bamboo,8\n Bamboo,9\n## Sample Output (Bonus 1)\n Not a winning hand\n## Sample Input (Bonus 2)\n 14\n Circle,4\n Circle,5\n Circle,6\n Bamboo,1\n Bamboo,2\n Bamboo,3\n Red Dragon\n Red Dragon\n Red Dragon\n Circle,1\n Circle,1\n Bamboo,7\n Bamboo,8\n Bamboo,9\n## Sample Output (Bonus 2)\n Winning hand\n## Sample Input (Bonus 2)\n 14\n Circle,4\n Circle,5\n Circle,6\n Bamboo,1\n Bamboo,2\n Bamboo,3\n Red Dragon\n Green Dragon\n White Dragon\n Circle,1\n Circle,1\n Bamboo,7\n Bamboo,8\n Bamboo,9\n## Sample Output (Bonus 2)\n Not a winning hand\n## Sample Input (Bonus 3)\n 14\n Circle,4\n Circle,4\n Character,5\n Character,5\n Bamboo,5\n Bamboo,5\n Circle,5\n Circle,5\n Circle,7\n Circle,7\n Circle,9\n Circle,9\n Circle,9\n Circle,9\n## Sample Output (Bonus 3)\n Winning hand\n# Notes\nNone of the bonus components depend on each other, and can be implemented in any order. The test cases do not presume\ncompletion of earlier bonus components. The order is just the recommended implementation order.\nMany thanks to Redditor /u/oketa for this submission to /r/dailyprogrammer_ideas. If you have any ideas, please submit\nthem there!\n\"\"\"\n\n\ndef main():\n pass\n\n\nif __name__ == \"__main__\":\n main()\n"},"license":{"kind":"string","value":"mit"}}},{"rowIdx":2759,"cells":{"repo_name":{"kind":"string","value":"waldocarter/p2pool"},"path":{"kind":"string","value":"nattraverso/pynupnp/soap.py"},"copies":{"kind":"string","value":"288"},"size":{"kind":"string","value":"3547"},"content":{"kind":"string","value":"\"\"\"\nThis module is a SOAP client using twisted's deferreds.\nIt uses the SOAPpy package.\n\n@author: Raphael Slinckx\n@copyright: Copyright 2005\n@license: LGPL\n@contact: U{raphael@slinckx.net}\n@version: 0.1.0\n\"\"\"\n\n__revision__ = \"$id\"\n\nimport SOAPpy, logging\nfrom SOAPpy.Config import Config\nfrom twisted.web import client, error\n\n#General config\nConfig.typed = False\n\nclass SoapError(Exception):\n \"\"\"\n This is a SOAP error message, not an HTTP error message.\n \n The content of this error is a SOAPpy structure representing the\n SOAP error message.\n \"\"\"\n pass\n\nclass SoapProxy:\n \"\"\"\n Proxy for an url to which we send SOAP rpc calls.\n \"\"\"\n def __init__(self, url, prefix):\n \"\"\"\n Init the proxy, it will connect to the given url, using the\n given soap namespace.\n \n @param url: The url of the remote host to call\n @param prefix: The namespace prefix to use, eg.\n 'urn:schemas-upnp-org:service:WANIPConnection:1'\n \"\"\"\n logging.debug(\"Soap Proxy: '%s', prefix: '%s'\", url, prefix)\n self._url = url\n self._prefix = prefix\n \n def call(self, method, **kwargs):\n \"\"\"\n Call the given remote method with the given arguments, as keywords.\n \n Returns a deferred, called with SOAPpy structure representing\n the soap response.\n \n @param method: The method name to call, eg. 'GetExternalIP'\n @param kwargs: The parameters of the call, as keywords\n @return: A deferred called with the external ip address of this host\n @rtype: L{twisted.internet.defer.Deferred}\n \"\"\"\n payload = SOAPpy.buildSOAP(method=method, config=Config, namespace=self._prefix, kw=kwargs)\n # Here begins the nasty hack\n payload = payload.replace(\n # Upnp wants s: instead of SOAP-ENV\n 'SOAP-ENV','s').replace(\n # Doesn't seem to like these encoding stuff\n 'xmlns:SOAP-ENC=\"http://schemas.xmlsoap.org/soap/encoding/\"', '').replace(\n 'SOAP-ENC:root=\"1\"', '').replace(\n # And it wants u: instead of ns1 namespace for arguments..\n 'ns1','u')\n \n logging.debug(\"SOAP Payload:\\n%s\", payload)\n \n return client.getPage(self._url, postdata=payload, method=\"POST\",\n headers={'content-type': 'text/xml', 'SOAPACTION': '%s#%s' % (self._prefix, method)}\n ).addCallbacks(self._got_page, self._got_error)\n \n def _got_page(self, result):\n \"\"\"\n The http POST command was successful, we parse the SOAP\n answer, and return it.\n \n @param result: the xml content\n \"\"\"\n parsed = SOAPpy.parseSOAPRPC(result)\n \n logging.debug(\"SOAP Answer:\\n%s\", result)\n logging.debug(\"SOAP Parsed Answer: %r\", parsed)\n \n return parsed\n \n def _got_error(self, res):\n \"\"\"\n The HTTP POST command did not succeed, depending on the error type:\n - it's a SOAP error, we parse it and return a L{SoapError}.\n - it's another type of error (http, other), we raise it as is\n \"\"\"\n logging.debug(\"SOAP Error:\\n%s\", res)\n \n if isinstance(res.value, error.Error):\n try:\n logging.debug(\"SOAP Error content:\\n%s\", res.value.response)\n raise SoapError(SOAPpy.parseSOAPRPC(res.value.response)[\"detail\"])\n except:\n raise\n raise Exception(res.value)\n"},"license":{"kind":"string","value":"gpl-3.0"}}},{"rowIdx":2760,"cells":{"repo_name":{"kind":"string","value":"tiagofrepereira2012/tensorflow"},"path":{"kind":"string","value":"tensorflow/python/debug/cli/readline_ui_test.py"},"copies":{"kind":"string","value":"81"},"size":{"kind":"string","value":"5646"},"content":{"kind":"string","value":"# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests of the readline-based CLI.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport argparse\nimport tempfile\n\nfrom tensorflow.python.debug.cli import debugger_cli_common\nfrom tensorflow.python.debug.cli import readline_ui\nfrom tensorflow.python.debug.cli import ui_factory\nfrom tensorflow.python.framework import test_util\nfrom tensorflow.python.platform import gfile\nfrom tensorflow.python.platform import googletest\n\n\nclass MockReadlineUI(readline_ui.ReadlineUI):\n \"\"\"Test subclass of ReadlineUI that bypasses terminal manipulations.\"\"\"\n\n def __init__(self, on_ui_exit=None, command_sequence=None):\n readline_ui.ReadlineUI.__init__(self, on_ui_exit=on_ui_exit)\n\n self._command_sequence = command_sequence\n self._command_counter = 0\n\n self.observers = {\"screen_outputs\": []}\n\n def _get_user_command(self):\n command = self._command_sequence[self._command_counter]\n self._command_counter += 1\n return command\n\n def _display_output(self, screen_output):\n self.observers[\"screen_outputs\"].append(screen_output)\n\n\nclass CursesTest(test_util.TensorFlowTestCase):\n\n def _babble(self, args, screen_info=None):\n ap = argparse.ArgumentParser(\n description=\"Do babble.\", usage=argparse.SUPPRESS)\n ap.add_argument(\n \"-n\",\n \"--num_times\",\n dest=\"num_times\",\n type=int,\n default=60,\n help=\"How many times to babble\")\n\n parsed = ap.parse_args(args)\n\n lines = [\"bar\"] * parsed.num_times\n return debugger_cli_common.RichTextLines(lines)\n\n def testUIFactoryCreatesReadlineUI(self):\n ui = ui_factory.get_ui(\"readline\")\n self.assertIsInstance(ui, readline_ui.ReadlineUI)\n\n def testUIFactoryRaisesExceptionOnInvalidUIType(self):\n with self.assertRaisesRegexp(ValueError, \"Invalid ui_type: 'foobar'\"):\n ui_factory.get_ui(\"foobar\")\n\n def testUIFactoryRaisesExceptionOnInvalidUITypeGivenAvailable(self):\n with self.assertRaisesRegexp(ValueError, \"Invalid ui_type: 'readline'\"):\n ui_factory.get_ui(\"readline\", available_ui_types=[\"curses\"])\n\n def testRunUIExitImmediately(self):\n \"\"\"Make sure that the UI can exit properly after launch.\"\"\"\n\n ui = MockReadlineUI(command_sequence=[\"exit\"])\n ui.run_ui()\n\n # No screen output should have happened.\n self.assertEqual(0, len(ui.observers[\"screen_outputs\"]))\n\n def testRunUIEmptyCommand(self):\n \"\"\"Issue an empty command then exit.\"\"\"\n\n ui = MockReadlineUI(command_sequence=[\"\", \"exit\"])\n ui.run_ui()\n self.assertEqual(1, len(ui.observers[\"screen_outputs\"]))\n\n def testRunUIWithInitCmd(self):\n \"\"\"Run UI with an initial command specified.\"\"\"\n\n ui = MockReadlineUI(command_sequence=[\"exit\"])\n\n ui.register_command_handler(\"babble\", self._babble, \"\")\n ui.run_ui(init_command=\"babble\")\n\n screen_outputs = ui.observers[\"screen_outputs\"]\n self.assertEqual(1, len(screen_outputs))\n self.assertEqual([\"bar\"] * 60, screen_outputs[0].lines)\n\n def testRunUIWithValidUsersCommands(self):\n \"\"\"Run UI with an initial command specified.\"\"\"\n\n ui = MockReadlineUI(command_sequence=[\"babble -n 3\", \"babble -n 6\", \"exit\"])\n ui.register_command_handler(\"babble\", self._babble, \"\")\n ui.run_ui()\n\n screen_outputs = ui.observers[\"screen_outputs\"]\n self.assertEqual(2, len(screen_outputs))\n self.assertEqual([\"bar\"] * 3, screen_outputs[0].lines)\n self.assertEqual([\"bar\"] * 6, screen_outputs[1].lines)\n\n def testRunUIWithInvalidUsersCommands(self):\n \"\"\"Run UI with an initial command specified.\"\"\"\n\n ui = MockReadlineUI(command_sequence=[\"babble -n 3\", \"wobble\", \"exit\"])\n ui.register_command_handler(\"babble\", self._babble, \"\")\n ui.run_ui()\n\n screen_outputs = ui.observers[\"screen_outputs\"]\n self.assertEqual(2, len(screen_outputs))\n self.assertEqual([\"bar\"] * 3, screen_outputs[0].lines)\n self.assertEqual([\"ERROR: Invalid command prefix \\\"wobble\\\"\"],\n screen_outputs[1].lines)\n\n def testRunUIWithOnUIExitCallback(self):\n observer = {\"callback_invoked\": False}\n\n def callback_for_test():\n observer[\"callback_invoked\"] = True\n\n ui = MockReadlineUI(on_ui_exit=callback_for_test, command_sequence=[\"exit\"])\n\n self.assertFalse(observer[\"callback_invoked\"])\n ui.run_ui()\n\n self.assertEqual(0, len(ui.observers[\"screen_outputs\"]))\n self.assertTrue(observer[\"callback_invoked\"])\n\n def testIncompleteRedirectWorks(self):\n output_path = tempfile.mktemp()\n\n ui = MockReadlineUI(\n command_sequence=[\"babble -n 2 > %s\" % output_path, \"exit\"])\n\n ui.register_command_handler(\"babble\", self._babble, \"\")\n ui.run_ui()\n\n screen_outputs = ui.observers[\"screen_outputs\"]\n self.assertEqual(1, len(screen_outputs))\n self.assertEqual([\"bar\"] * 2, screen_outputs[0].lines)\n\n with gfile.Open(output_path, \"r\") as f:\n self.assertEqual(\"bar\\nbar\\n\", f.read())\n\n\nif __name__ == \"__main__\":\n googletest.main()\n"},"license":{"kind":"string","value":"apache-2.0"}}},{"rowIdx":2761,"cells":{"repo_name":{"kind":"string","value":"RaRe-Technologies/gensim"},"path":{"kind":"string","value":"gensim/test/test_lee.py"},"copies":{"kind":"string","value":"5"},"size":{"kind":"string","value":"4277"},"content":{"kind":"string","value":"#!/usr/bin/env python\n# encoding: utf-8\n#\n# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html\n\n\"\"\"\nAutomated test to reproduce the results of Lee et al. (2005)\n\nLee et al. (2005) compares different models for semantic\nsimilarity and verifies the results with similarity judgements from humans.\n\nAs a validation of the gensim implementation we reproduced the results\nof Lee et al. (2005) in this test.\n\nMany thanks to Michael D. Lee (michael.lee@adelaide.edu.au) who provideded us\nwith his corpus and similarity data.\n\nIf you need to reference this dataset, please cite:\n\nLee, M., Pincombe, B., & Welsh, M. (2005).\nAn empirical evaluation of models of text document similarity.\nProceedings of the 27th Annual Conference of the Cognitive Science Society\n\"\"\"\n\nfrom __future__ import with_statement\n\nimport logging\nimport os.path\nimport unittest\nfrom functools import partial\n\nimport numpy as np\n\nfrom gensim import corpora, models, utils, matutils\nfrom gensim.parsing.preprocessing import preprocess_documents, preprocess_string, DEFAULT_FILTERS\n\n\nbg_corpus = None\ncorpus = None\nhuman_sim_vector = None\n\n\nclass TestLeeTest(unittest.TestCase):\n def setUp(self):\n \"\"\"setup lee test corpora\"\"\"\n global bg_corpus, corpus, human_sim_vector, bg_corpus2, corpus2\n\n pre_path = os.path.join(os.path.dirname(__file__), 'test_data')\n bg_corpus_file = 'lee_background.cor'\n corpus_file = 'lee.cor'\n sim_file = 'similarities0-1.txt'\n\n # read in the corpora\n latin1 = partial(utils.to_unicode, encoding='latin1')\n with utils.open(os.path.join(pre_path, bg_corpus_file), 'rb') as f:\n bg_corpus = preprocess_documents(latin1(line) for line in f)\n with utils.open(os.path.join(pre_path, corpus_file), 'rb') as f:\n corpus = preprocess_documents(latin1(line) for line in f)\n with utils.open(os.path.join(pre_path, bg_corpus_file), 'rb') as f:\n bg_corpus2 = [preprocess_string(latin1(s), filters=DEFAULT_FILTERS[:-1]) for s in f]\n with utils.open(os.path.join(pre_path, corpus_file), 'rb') as f:\n corpus2 = [preprocess_string(latin1(s), filters=DEFAULT_FILTERS[:-1]) for s in f]\n\n # read the human similarity data\n sim_matrix = np.loadtxt(os.path.join(pre_path, sim_file))\n sim_m_size = np.shape(sim_matrix)[0]\n human_sim_vector = sim_matrix[np.triu_indices(sim_m_size, 1)]\n\n def test_corpus(self):\n \"\"\"availability and integrity of corpus\"\"\"\n documents_in_bg_corpus = 300\n documents_in_corpus = 50\n len_sim_vector = 1225\n self.assertEqual(len(bg_corpus), documents_in_bg_corpus)\n self.assertEqual(len(corpus), documents_in_corpus)\n self.assertEqual(len(human_sim_vector), len_sim_vector)\n\n def test_lee(self):\n \"\"\"correlation with human data > 0.6\n (this is the value which was achieved in the original paper)\n \"\"\"\n\n global bg_corpus, corpus\n\n # create a dictionary and corpus (bag of words)\n dictionary = corpora.Dictionary(bg_corpus)\n bg_corpus = [dictionary.doc2bow(text) for text in bg_corpus]\n corpus = [dictionary.doc2bow(text) for text in corpus]\n\n # transform the bag of words with log_entropy normalization\n log_ent = models.LogEntropyModel(bg_corpus)\n bg_corpus_ent = log_ent[bg_corpus]\n\n # initialize an LSI transformation from background corpus\n lsi = models.LsiModel(bg_corpus_ent, id2word=dictionary, num_topics=200)\n # transform small corpus to lsi bow->log_ent->fold-in-lsi\n corpus_lsi = lsi[log_ent[corpus]]\n\n # compute pairwise similarity matrix and extract upper triangular\n res = np.zeros((len(corpus), len(corpus)))\n for i, par1 in enumerate(corpus_lsi):\n for j, par2 in enumerate(corpus_lsi):\n res[i, j] = matutils.cossim(par1, par2)\n flat = res[np.triu_indices(len(corpus), 1)]\n\n cor = np.corrcoef(flat, human_sim_vector)[0, 1]\n logging.info(\"LSI correlation coefficient is %s\", cor)\n self.assertTrue(cor > 0.6)\n\n\nif __name__ == '__main__':\n logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.DEBUG)\n unittest.main()\n"},"license":{"kind":"string","value":"lgpl-2.1"}}},{"rowIdx":2762,"cells":{"repo_name":{"kind":"string","value":"vodik/pytest"},"path":{"kind":"string","value":"testing/acceptance_test.py"},"copies":{"kind":"string","value":"4"},"size":{"kind":"string","value":"23849"},"content":{"kind":"string","value":"import sys\n\nimport _pytest._code\nimport py\nimport pytest\nfrom _pytest.main import EXIT_NOTESTSCOLLECTED, EXIT_USAGEERROR\n\n\nclass TestGeneralUsage:\n def test_config_error(self, testdir):\n testdir.makeconftest(\"\"\"\n def pytest_configure(config):\n import pytest\n raise pytest.UsageError(\"hello\")\n \"\"\")\n result = testdir.runpytest(testdir.tmpdir)\n assert result.ret != 0\n result.stderr.fnmatch_lines([\n '*ERROR: hello'\n ])\n\n def test_root_conftest_syntax_error(self, testdir):\n testdir.makepyfile(conftest=\"raise SyntaxError\\n\")\n result = testdir.runpytest()\n result.stderr.fnmatch_lines([\"*raise SyntaxError*\"])\n assert result.ret != 0\n\n def test_early_hook_error_issue38_1(self, testdir):\n testdir.makeconftest(\"\"\"\n def pytest_sessionstart():\n 0 / 0\n \"\"\")\n result = testdir.runpytest(testdir.tmpdir)\n assert result.ret != 0\n # tracestyle is native by default for hook failures\n result.stdout.fnmatch_lines([\n '*INTERNALERROR*File*conftest.py*line 2*',\n '*0 / 0*',\n ])\n result = testdir.runpytest(testdir.tmpdir, \"--fulltrace\")\n assert result.ret != 0\n # tracestyle is native by default for hook failures\n result.stdout.fnmatch_lines([\n '*INTERNALERROR*def pytest_sessionstart():*',\n '*INTERNALERROR*0 / 0*',\n ])\n\n def test_early_hook_configure_error_issue38(self, testdir):\n testdir.makeconftest(\"\"\"\n def pytest_configure():\n 0 / 0\n \"\"\")\n result = testdir.runpytest(testdir.tmpdir)\n assert result.ret != 0\n # here we get it on stderr\n result.stderr.fnmatch_lines([\n '*INTERNALERROR*File*conftest.py*line 2*',\n '*0 / 0*',\n ])\n\n def test_file_not_found(self, testdir):\n result = testdir.runpytest(\"asd\")\n assert result.ret != 0\n result.stderr.fnmatch_lines([\"ERROR: file not found*asd\"])\n\n def test_file_not_found_unconfigure_issue143(self, testdir):\n testdir.makeconftest(\"\"\"\n def pytest_configure():\n print(\"---configure\")\n def pytest_unconfigure():\n print(\"---unconfigure\")\n \"\"\")\n result = testdir.runpytest(\"-s\", \"asd\")\n assert result.ret == 4 # EXIT_USAGEERROR\n result.stderr.fnmatch_lines([\"ERROR: file not found*asd\"])\n result.stdout.fnmatch_lines([\n \"*---configure\",\n \"*---unconfigure\",\n ])\n\n\n def test_config_preparse_plugin_option(self, testdir):\n testdir.makepyfile(pytest_xyz=\"\"\"\n def pytest_addoption(parser):\n parser.addoption(\"--xyz\", dest=\"xyz\", action=\"store\")\n \"\"\")\n testdir.makepyfile(test_one=\"\"\"\n def test_option(pytestconfig):\n assert pytestconfig.option.xyz == \"123\"\n \"\"\")\n result = testdir.runpytest(\"-p\", \"pytest_xyz\", \"--xyz=123\", syspathinsert=True)\n assert result.ret == 0\n result.stdout.fnmatch_lines([\n '*1 passed*',\n ])\n\n def test_assertion_magic(self, testdir):\n p = testdir.makepyfile(\"\"\"\n def test_this():\n x = 0\n assert x\n \"\"\")\n result = testdir.runpytest(p)\n result.stdout.fnmatch_lines([\n \"> assert x\",\n \"E assert 0\",\n ])\n assert result.ret == 1\n\n def test_nested_import_error(self, testdir):\n p = testdir.makepyfile(\"\"\"\n import import_fails\n def test_this():\n assert import_fails.a == 1\n \"\"\")\n testdir.makepyfile(import_fails=\"import does_not_work\")\n result = testdir.runpytest(p)\n result.stdout.fnmatch_lines([\n #XXX on jython this fails: \"> import import_fails\",\n \"E ImportError: No module named *does_not_work*\",\n ])\n assert result.ret == 1\n\n def test_not_collectable_arguments(self, testdir):\n p1 = testdir.makepyfile(\"\")\n p2 = testdir.makefile(\".pyc\", \"123\")\n result = testdir.runpytest(p1, p2)\n assert result.ret\n result.stderr.fnmatch_lines([\n \"*ERROR: not found:*%s\" %(p2.basename,)\n ])\n\n def test_issue486_better_reporting_on_conftest_load_failure(self, testdir):\n testdir.makepyfile(\"\")\n testdir.makeconftest(\"import qwerty\")\n result = testdir.runpytest(\"--help\")\n result.stdout.fnmatch_lines(\"\"\"\n *--version*\n *warning*conftest.py*\n \"\"\")\n result = testdir.runpytest()\n result.stderr.fnmatch_lines(\"\"\"\n *ERROR*could not load*conftest.py*\n \"\"\")\n\n\n def test_early_skip(self, testdir):\n testdir.mkdir(\"xyz\")\n testdir.makeconftest(\"\"\"\n import pytest\n def pytest_collect_directory():\n pytest.skip(\"early\")\n \"\"\")\n result = testdir.runpytest()\n assert result.ret == EXIT_NOTESTSCOLLECTED\n result.stdout.fnmatch_lines([\n \"*1 skip*\"\n ])\n\n def test_issue88_initial_file_multinodes(self, testdir):\n testdir.makeconftest(\"\"\"\n import pytest\n class MyFile(pytest.File):\n def collect(self):\n return [MyItem(\"hello\", parent=self)]\n def pytest_collect_file(path, parent):\n return MyFile(path, parent)\n class MyItem(pytest.Item):\n pass\n \"\"\")\n p = testdir.makepyfile(\"def test_hello(): pass\")\n result = testdir.runpytest(p, \"--collect-only\")\n result.stdout.fnmatch_lines([\n \"*MyFile*test_issue88*\",\n \"*Module*test_issue88*\",\n ])\n\n def test_issue93_initialnode_importing_capturing(self, testdir):\n testdir.makeconftest(\"\"\"\n import sys\n print (\"should not be seen\")\n sys.stderr.write(\"stder42\\\\n\")\n \"\"\")\n result = testdir.runpytest()\n assert result.ret == EXIT_NOTESTSCOLLECTED\n assert \"should not be seen\" not in result.stdout.str()\n assert \"stderr42\" not in result.stderr.str()\n\n def test_conftest_printing_shows_if_error(self, testdir):\n testdir.makeconftest(\"\"\"\n print (\"should be seen\")\n assert 0\n \"\"\")\n result = testdir.runpytest()\n assert result.ret != 0\n assert \"should be seen\" in result.stdout.str()\n\n @pytest.mark.skipif(not hasattr(py.path.local, 'mksymlinkto'),\n reason=\"symlink not available on this platform\")\n def test_chdir(self, testdir):\n testdir.tmpdir.join(\"py\").mksymlinkto(py._pydir)\n p = testdir.tmpdir.join(\"main.py\")\n p.write(_pytest._code.Source(\"\"\"\n import sys, os\n sys.path.insert(0, '')\n import py\n print (py.__file__)\n print (py.__path__)\n os.chdir(os.path.dirname(os.getcwd()))\n print (py.log)\n \"\"\"))\n result = testdir.runpython(p)\n assert not result.ret\n\n def test_issue109_sibling_conftests_not_loaded(self, testdir):\n sub1 = testdir.tmpdir.mkdir(\"sub1\")\n sub2 = testdir.tmpdir.mkdir(\"sub2\")\n sub1.join(\"conftest.py\").write(\"assert 0\")\n result = testdir.runpytest(sub2)\n assert result.ret == EXIT_NOTESTSCOLLECTED\n sub2.ensure(\"__init__.py\")\n p = sub2.ensure(\"test_hello.py\")\n result = testdir.runpytest(p)\n assert result.ret == EXIT_NOTESTSCOLLECTED\n result = testdir.runpytest(sub1)\n assert result.ret == EXIT_USAGEERROR\n\n def test_directory_skipped(self, testdir):\n testdir.makeconftest(\"\"\"\n import pytest\n def pytest_ignore_collect():\n pytest.skip(\"intentional\")\n \"\"\")\n testdir.makepyfile(\"def test_hello(): pass\")\n result = testdir.runpytest()\n assert result.ret == EXIT_NOTESTSCOLLECTED\n result.stdout.fnmatch_lines([\n \"*1 skipped*\"\n ])\n\n def test_multiple_items_per_collector_byid(self, testdir):\n c = testdir.makeconftest(\"\"\"\n import pytest\n class MyItem(pytest.Item):\n def runtest(self):\n pass\n class MyCollector(pytest.File):\n def collect(self):\n return [MyItem(name=\"xyz\", parent=self)]\n def pytest_collect_file(path, parent):\n if path.basename.startswith(\"conftest\"):\n return MyCollector(path, parent)\n \"\"\")\n result = testdir.runpytest(c.basename+\"::\"+\"xyz\")\n assert result.ret == 0\n result.stdout.fnmatch_lines([\n \"*1 pass*\",\n ])\n\n def test_skip_on_generated_funcarg_id(self, testdir):\n testdir.makeconftest(\"\"\"\n import pytest\n def pytest_generate_tests(metafunc):\n metafunc.addcall({'x': 3}, id='hello-123')\n def pytest_runtest_setup(item):\n print (item.keywords)\n if 'hello-123' in item.keywords:\n pytest.skip(\"hello\")\n assert 0\n \"\"\")\n p = testdir.makepyfile(\"\"\"def test_func(x): pass\"\"\")\n res = testdir.runpytest(p)\n assert res.ret == 0\n res.stdout.fnmatch_lines([\"*1 skipped*\"])\n\n def test_direct_addressing_selects(self, testdir):\n p = testdir.makepyfile(\"\"\"\n def pytest_generate_tests(metafunc):\n metafunc.addcall({'i': 1}, id=\"1\")\n metafunc.addcall({'i': 2}, id=\"2\")\n def test_func(i):\n pass\n \"\"\")\n res = testdir.runpytest(p.basename + \"::\" + \"test_func[1]\")\n assert res.ret == 0\n res.stdout.fnmatch_lines([\"*1 passed*\"])\n\n def test_direct_addressing_notfound(self, testdir):\n p = testdir.makepyfile(\"\"\"\n def test_func():\n pass\n \"\"\")\n res = testdir.runpytest(p.basename + \"::\" + \"test_notfound\")\n assert res.ret\n res.stderr.fnmatch_lines([\"*ERROR*not found*\"])\n\n def test_docstring_on_hookspec(self):\n from _pytest import hookspec\n for name, value in vars(hookspec).items():\n if name.startswith(\"pytest_\"):\n assert value.__doc__, \"no docstring for %s\" % name\n\n def test_initialization_error_issue49(self, testdir):\n testdir.makeconftest(\"\"\"\n def pytest_configure():\n x\n \"\"\")\n result = testdir.runpytest()\n assert result.ret == 3 # internal error\n result.stderr.fnmatch_lines([\n \"INTERNAL*pytest_configure*\",\n \"INTERNAL*x*\",\n ])\n assert 'sessionstarttime' not in result.stderr.str()\n\n @pytest.mark.parametrize('lookfor', ['test_fun.py', 'test_fun.py::test_a'])\n def test_issue134_report_syntaxerror_when_collecting_member(self, testdir, lookfor):\n testdir.makepyfile(test_fun=\"\"\"\n def test_a():\n pass\n def\"\"\")\n result = testdir.runpytest(lookfor)\n result.stdout.fnmatch_lines(['*SyntaxError*'])\n if '::' in lookfor:\n result.stderr.fnmatch_lines([\n '*ERROR*',\n ])\n assert result.ret == 4 # usage error only if item not found\n\n def test_report_all_failed_collections_initargs(self, testdir):\n testdir.makepyfile(test_a=\"def\", test_b=\"def\")\n result = testdir.runpytest(\"test_a.py::a\", \"test_b.py::b\")\n result.stderr.fnmatch_lines([\n \"*ERROR*test_a.py::a*\",\n \"*ERROR*test_b.py::b*\",\n ])\n\n def test_namespace_import_doesnt_confuse_import_hook(self, testdir):\n # Ref #383. Python 3.3's namespace package messed with our import hooks\n # Importing a module that didn't exist, even if the ImportError was\n # gracefully handled, would make our test crash.\n testdir.mkdir('not_a_package')\n p = testdir.makepyfile(\"\"\"\n try:\n from not_a_package import doesnt_exist\n except ImportError:\n # We handle the import error gracefully here\n pass\n\n def test_whatever():\n pass\n \"\"\")\n res = testdir.runpytest(p.basename)\n assert res.ret == 0\n\n def test_unknown_option(self, testdir):\n result = testdir.runpytest(\"--qwlkej\")\n result.stderr.fnmatch_lines(\"\"\"\n *unrecognized*\n \"\"\")\n\n def test_getsourcelines_error_issue553(self, testdir, monkeypatch):\n monkeypatch.setattr(\"inspect.getsourcelines\", None)\n p = testdir.makepyfile(\"\"\"\n def raise_error(obj):\n raise IOError('source code not available')\n\n import inspect\n inspect.getsourcelines = raise_error\n\n def test_foo(invalid_fixture):\n pass\n \"\"\")\n res = testdir.runpytest(p)\n res.stdout.fnmatch_lines([\n \"*source code not available*\",\n \"*fixture 'invalid_fixture' not found\",\n ])\n\n def test_plugins_given_as_strings(self, tmpdir, monkeypatch):\n \"\"\"test that str values passed to main() as `plugins` arg\n are interpreted as module names to be imported and registered.\n #855.\n \"\"\"\n with pytest.raises(ImportError) as excinfo:\n pytest.main([str(tmpdir)], plugins=['invalid.module'])\n assert 'invalid' in str(excinfo.value)\n\n p = tmpdir.join('test_test_plugins_given_as_strings.py')\n p.write('def test_foo(): pass')\n mod = py.std.types.ModuleType(\"myplugin\")\n monkeypatch.setitem(sys.modules, 'myplugin', mod)\n assert pytest.main(args=[str(tmpdir)], plugins=['myplugin']) == 0\n\n def test_parameterized_with_bytes_regex(self, testdir):\n p = testdir.makepyfile(\"\"\"\n import re\n import pytest\n @pytest.mark.parametrize('r', [re.compile(b'foo')])\n def test_stuff(r):\n pass\n \"\"\"\n )\n res = testdir.runpytest(p)\n res.stdout.fnmatch_lines([\n '*1 passed*'\n ])\n\n\nclass TestInvocationVariants:\n def test_earlyinit(self, testdir):\n p = testdir.makepyfile(\"\"\"\n import pytest\n assert hasattr(pytest, 'mark')\n \"\"\")\n result = testdir.runpython(p)\n assert result.ret == 0\n\n @pytest.mark.xfail(\"sys.platform.startswith('java')\")\n def test_pydoc(self, testdir):\n for name in ('py.test', 'pytest'):\n result = testdir.runpython_c(\"import %s;help(%s)\" % (name, name))\n assert result.ret == 0\n s = result.stdout.str()\n assert 'MarkGenerator' in s\n\n def test_import_star_py_dot_test(self, testdir):\n p = testdir.makepyfile(\"\"\"\n from py.test import *\n #collect\n #cmdline\n #Item\n #assert collect.Item is Item\n #assert collect.Collector is Collector\n main\n skip\n xfail\n \"\"\")\n result = testdir.runpython(p)\n assert result.ret == 0\n\n def test_import_star_pytest(self, testdir):\n p = testdir.makepyfile(\"\"\"\n from pytest import *\n #Item\n #File\n main\n skip\n xfail\n \"\"\")\n result = testdir.runpython(p)\n assert result.ret == 0\n\n def test_double_pytestcmdline(self, testdir):\n p = testdir.makepyfile(run=\"\"\"\n import pytest\n pytest.main()\n pytest.main()\n \"\"\")\n testdir.makepyfile(\"\"\"\n def test_hello():\n pass\n \"\"\")\n result = testdir.runpython(p)\n result.stdout.fnmatch_lines([\n \"*1 passed*\",\n \"*1 passed*\",\n ])\n\n def test_python_minus_m_invocation_ok(self, testdir):\n p1 = testdir.makepyfile(\"def test_hello(): pass\")\n res = testdir.run(py.std.sys.executable, \"-m\", \"pytest\", str(p1))\n assert res.ret == 0\n\n def test_python_minus_m_invocation_fail(self, testdir):\n p1 = testdir.makepyfile(\"def test_fail(): 0/0\")\n res = testdir.run(py.std.sys.executable, \"-m\", \"pytest\", str(p1))\n assert res.ret == 1\n\n def test_python_pytest_package(self, testdir):\n p1 = testdir.makepyfile(\"def test_pass(): pass\")\n res = testdir.run(py.std.sys.executable, \"-m\", \"pytest\", str(p1))\n assert res.ret == 0\n res.stdout.fnmatch_lines([\"*1 passed*\"])\n\n def test_equivalence_pytest_pytest(self):\n assert pytest.main == py.test.cmdline.main\n\n def test_invoke_with_string(self, capsys):\n retcode = pytest.main(\"-h\")\n assert not retcode\n out, err = capsys.readouterr()\n assert \"--help\" in out\n pytest.raises(ValueError, lambda: pytest.main(0))\n\n def test_invoke_with_path(self, tmpdir, capsys):\n retcode = pytest.main(tmpdir)\n assert retcode == EXIT_NOTESTSCOLLECTED\n out, err = capsys.readouterr()\n\n def test_invoke_plugin_api(self, testdir, capsys):\n class MyPlugin:\n def pytest_addoption(self, parser):\n parser.addoption(\"--myopt\")\n\n pytest.main([\"-h\"], plugins=[MyPlugin()])\n out, err = capsys.readouterr()\n assert \"--myopt\" in out\n\n def test_pyargs_importerror(self, testdir, monkeypatch):\n monkeypatch.delenv('PYTHONDONTWRITEBYTECODE', False)\n path = testdir.mkpydir(\"tpkg\")\n path.join(\"test_hello.py\").write('raise ImportError')\n\n result = testdir.runpytest(\"--pyargs\", \"tpkg.test_hello\")\n assert result.ret != 0\n # FIXME: It would be more natural to match NOT\n # \"ERROR*file*or*package*not*found*\".\n result.stdout.fnmatch_lines([\n \"*collected 0 items*\"\n ])\n\n def test_cmdline_python_package(self, testdir, monkeypatch):\n monkeypatch.delenv('PYTHONDONTWRITEBYTECODE', False)\n path = testdir.mkpydir(\"tpkg\")\n path.join(\"test_hello.py\").write(\"def test_hello(): pass\")\n path.join(\"test_world.py\").write(\"def test_world(): pass\")\n result = testdir.runpytest(\"--pyargs\", \"tpkg\")\n assert result.ret == 0\n result.stdout.fnmatch_lines([\n \"*2 passed*\"\n ])\n result = testdir.runpytest(\"--pyargs\", \"tpkg.test_hello\")\n assert result.ret == 0\n result.stdout.fnmatch_lines([\n \"*1 passed*\"\n ])\n\n def join_pythonpath(what):\n cur = py.std.os.environ.get('PYTHONPATH')\n if cur:\n return str(what) + ':' + cur\n return what\n empty_package = testdir.mkpydir(\"empty_package\")\n monkeypatch.setenv('PYTHONPATH', join_pythonpath(empty_package))\n result = testdir.runpytest(\"--pyargs\", \".\")\n assert result.ret == 0\n result.stdout.fnmatch_lines([\n \"*2 passed*\"\n ])\n\n monkeypatch.setenv('PYTHONPATH', join_pythonpath(testdir))\n path.join('test_hello.py').remove()\n result = testdir.runpytest(\"--pyargs\", \"tpkg.test_hello\")\n assert result.ret != 0\n result.stderr.fnmatch_lines([\n \"*not*found*test_hello*\",\n ])\n\n def test_cmdline_python_package_not_exists(self, testdir):\n result = testdir.runpytest(\"--pyargs\", \"tpkgwhatv\")\n assert result.ret\n result.stderr.fnmatch_lines([\n \"ERROR*file*or*package*not*found*\",\n ])\n\n @pytest.mark.xfail(reason=\"decide: feature or bug\")\n def test_noclass_discovery_if_not_testcase(self, testdir):\n testpath = testdir.makepyfile(\"\"\"\n import unittest\n class TestHello(object):\n def test_hello(self):\n assert self.attr\n\n class RealTest(unittest.TestCase, TestHello):\n attr = 42\n \"\"\")\n reprec = testdir.inline_run(testpath)\n reprec.assertoutcome(passed=1)\n\n def test_doctest_id(self, testdir):\n testdir.makefile('.txt', \"\"\"\n >>> x=3\n >>> x\n 4\n \"\"\")\n result = testdir.runpytest(\"-rf\")\n lines = result.stdout.str().splitlines()\n for line in lines:\n if line.startswith(\"FAIL \"):\n testid = line[5:].strip()\n break\n result = testdir.runpytest(testid, '-rf')\n result.stdout.fnmatch_lines([\n line,\n \"*1 failed*\",\n ])\n\n def test_core_backward_compatibility(self):\n \"\"\"Test backward compatibility for get_plugin_manager function. See #787.\"\"\"\n import _pytest.config\n assert type(_pytest.config.get_plugin_manager()) is _pytest.config.PytestPluginManager\n\n\n def test_has_plugin(self, request):\n \"\"\"Test hasplugin function of the plugin manager (#932).\"\"\"\n assert request.config.pluginmanager.hasplugin('python')\n\n\nclass TestDurations:\n source = \"\"\"\n import time\n frag = 0.002\n def test_something():\n pass\n def test_2():\n time.sleep(frag*5)\n def test_1():\n time.sleep(frag)\n def test_3():\n time.sleep(frag*10)\n \"\"\"\n\n def test_calls(self, testdir):\n testdir.makepyfile(self.source)\n result = testdir.runpytest(\"--durations=10\")\n assert result.ret == 0\n result.stdout.fnmatch_lines_random([\n \"*durations*\",\n \"*call*test_3*\",\n \"*call*test_2*\",\n \"*call*test_1*\",\n ])\n\n def test_calls_show_2(self, testdir):\n testdir.makepyfile(self.source)\n result = testdir.runpytest(\"--durations=2\")\n assert result.ret == 0\n lines = result.stdout.get_lines_after(\"*slowest*durations*\")\n assert \"4 passed\" in lines[2]\n\n def test_calls_showall(self, testdir):\n testdir.makepyfile(self.source)\n result = testdir.runpytest(\"--durations=0\")\n assert result.ret == 0\n for x in \"123\":\n for y in 'call',: #'setup', 'call', 'teardown':\n for line in result.stdout.lines:\n if (\"test_%s\" % x) in line and y in line:\n break\n else:\n raise AssertionError(\"not found %s %s\" % (x,y))\n\n def test_with_deselected(self, testdir):\n testdir.makepyfile(self.source)\n result = testdir.runpytest(\"--durations=2\", \"-k test_1\")\n assert result.ret == 0\n result.stdout.fnmatch_lines([\n \"*durations*\",\n \"*call*test_1*\",\n ])\n\n def test_with_failing_collection(self, testdir):\n testdir.makepyfile(self.source)\n testdir.makepyfile(test_collecterror=\"\"\"xyz\"\"\")\n result = testdir.runpytest(\"--durations=2\", \"-k test_1\")\n assert result.ret != 0\n result.stdout.fnmatch_lines([\n \"*durations*\",\n \"*call*test_1*\",\n ])\n\n def test_with_not(self, testdir):\n testdir.makepyfile(self.source)\n result = testdir.runpytest(\"-k not 1\")\n assert result.ret == 0\n\n\nclass TestDurationWithFixture:\n source = \"\"\"\n import time\n frag = 0.001\n def setup_function(func):\n time.sleep(frag * 3)\n def test_1():\n time.sleep(frag*2)\n def test_2():\n time.sleep(frag)\n \"\"\"\n def test_setup_function(self, testdir):\n testdir.makepyfile(self.source)\n result = testdir.runpytest(\"--durations=10\")\n assert result.ret == 0\n\n result.stdout.fnmatch_lines_random(\"\"\"\n *durations*\n * setup *test_1*\n * call *test_1*\n \"\"\")\n\n"},"license":{"kind":"string","value":"mit"}}},{"rowIdx":2763,"cells":{"repo_name":{"kind":"string","value":"vasyarv/edx-platform"},"path":{"kind":"string","value":"lms/djangoapps/django_comment_client/management/commands/assign_role.py"},"copies":{"kind":"string","value":"251"},"size":{"kind":"string","value":"1144"},"content":{"kind":"string","value":"from optparse import make_option\n\nfrom django.core.management.base import BaseCommand, CommandError\nfrom django_comment_common.models import Role\nfrom django.contrib.auth.models import User\n\n\nclass Command(BaseCommand):\n option_list = BaseCommand.option_list + (\n make_option('--remove',\n action='store_true',\n dest='remove',\n default=False,\n help='Remove the role instead of adding it'),\n )\n\n args = ' '\n help = 'Assign a discussion forum role to a user '\n\n def handle(self, *args, **options):\n if len(args) != 3:\n raise CommandError('Usage is assign_role {0}'.format(self.args))\n\n name_or_email, role, course_id = args\n\n role = Role.objects.get(name=role, course_id=course_id)\n\n if '@' in name_or_email:\n user = User.objects.get(email=name_or_email)\n else:\n user = User.objects.get(username=name_or_email)\n\n if options['remove']:\n user.roles.remove(role)\n else:\n user.roles.add(role)\n\n print 'Success!'\n"},"license":{"kind":"string","value":"agpl-3.0"}}},{"rowIdx":2764,"cells":{"repo_name":{"kind":"string","value":"thepaul/uftrace"},"path":{"kind":"string","value":"tests/t217_no_libcall_dump.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"1525"},"content":{"kind":"string","value":"#!/usr/bin/env python\n\nfrom runtest import TestBase\nimport subprocess as sp\n\nTDIR='xxx'\n\nclass TestCase(TestBase):\n def __init__(self):\n TestBase.__init__(self, 'signal', \"\"\"\nuftrace file header: magic = 4674726163652100\nuftrace file header: version = 4\nuftrace file header: header size = 40\nuftrace file header: endian = 1 (little)\nuftrace file header: class = 2 (64 bit)\nuftrace file header: features = 0x363 (PLTHOOK | TASK_SESSION | SYM_REL_ADDR | MAX_STACK | PERF_EVENT | AUTO_ARGS)\nuftrace file header: info = 0x3bff\n\nreading 73755.dat\n50895.869952000 73755: [entry] main(400787) depth: 0\n50895.869952297 73755: [entry] foo(40071f) depth: 1\n50895.869952533 73755: [exit ] foo(40071f) depth: 1\n50895.869966333 73755: [entry] sighandler(400750) depth: 2\n50895.869966473 73755: [entry] bar(400734) depth: 3\n50895.869966617 73755: [exit ] bar(400734) depth: 3\n50895.869967067 73755: [exit ] sighandler(400750) depth: 2\n50895.869969790 73755: [entry] foo(40071f) depth: 1\n50895.869969907 73755: [exit ] foo(40071f) depth: 1\n50895.869970227 73755: [exit ] main(400787) depth: 0\n\"\"\", sort='dump')\n\n def pre(self):\n record_cmd = '%s record -d %s %s' % (TestBase.uftrace_cmd, TDIR, 't-' + self.name)\n sp.call(record_cmd.split())\n return TestBase.TEST_SUCCESS\n\n def runcmd(self):\n return '%s dump --no-libcall -d %s' % (TestBase.uftrace_cmd, TDIR)\n\n def post(self, ret):\n sp.call(['rm', '-rf', TDIR])\n return ret\n"},"license":{"kind":"string","value":"gpl-2.0"}}},{"rowIdx":2765,"cells":{"repo_name":{"kind":"string","value":"datakortet/django-cms"},"path":{"kind":"string","value":"cms/plugins/teaser/models.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"1148"},"content":{"kind":"string","value":"from django.core.cache import cache\nfrom django.db import models\nfrom django.utils.translation import ugettext_lazy as _\nfrom cms.models import CMSPlugin, Page\n\nclass Teaser(CMSPlugin):\n \"\"\"\n A Teaser\n \"\"\"\n title = models.CharField(_(\"title\"), max_length=255)\n image = models.ImageField(_(\"image\"), upload_to=CMSPlugin.get_media_path, blank=True, null=True)\n page_link = models.ForeignKey(\n Page, \n verbose_name=_(\"page\"), \n help_text=_(\"If present image will be clickable\"), \n blank=True, \n null=True, \n limit_choices_to={'publisher_is_draft': True}\n )\n url = models.CharField(_(\"link\"), max_length=255, blank=True, null=True, help_text=_(\"If present image will be clickable.\"))\n description = models.TextField(_(\"description\"), blank=True, null=True)\n\n @property\n def _cache_key(self):\n return \"%s_id_%d\" % (self.__class__.__name__, self.id)\n\n def save(self, *args, **kwargs):\n super(Teaser, self).save(*args, **kwargs)\n cache.delete(self._cache_key)\n\n def __unicode__(self):\n return self.title\n \n search_fields = ('description',)\n"},"license":{"kind":"string","value":"bsd-3-clause"}}},{"rowIdx":2766,"cells":{"repo_name":{"kind":"string","value":"MalloyPower/parsing-python"},"path":{"kind":"string","value":"front-end/testsuite-python-lib/Python-2.0/Lib/dos-8x3/test_win.py"},"copies":{"kind":"string","value":"11"},"size":{"kind":"string","value":"5449"},"content":{"kind":"string","value":"# Test the windows specific win32reg module.\n# Only win32reg functions not hit here: FlushKey, LoadKey and SaveKey\n\nfrom _winreg import *\nimport os, sys\n\ntest_key_name = \"SOFTWARE\\\\Python Registry Test Key - Delete Me\"\n\ntest_data = [\n (\"Int Value\", 45, REG_DWORD),\n (\"String Val\", \"A string value\", REG_SZ,),\n (u\"Unicode Val\", u\"A Unicode value\", REG_SZ,),\n (\"StringExpand\", \"The path is %path%\", REG_EXPAND_SZ),\n (\"UnicodeExpand\", u\"The path is %path%\", REG_EXPAND_SZ),\n (\"Multi-string\", [\"Lots\", \"of\", \"string\", \"values\"], REG_MULTI_SZ),\n (\"Multi-unicode\", [u\"Lots\", u\"of\", u\"unicode\", u\"values\"], REG_MULTI_SZ),\n (\"Multi-mixed\", [u\"Unicode\", u\"and\", \"string\", \"values\"],REG_MULTI_SZ),\n (\"Raw Data\", (\"binary\"+chr(0)+\"data\"), REG_BINARY),\n]\n\ndef WriteTestData(root_key):\n # Set the default value for this key.\n SetValue(root_key, test_key_name, REG_SZ, \"Default value\")\n key = CreateKey(root_key, test_key_name)\n # Create a sub-key\n sub_key = CreateKey(key, \"sub_key\")\n # Give the sub-key some named values\n\n for value_name, value_data, value_type in test_data:\n SetValueEx(sub_key, value_name, 0, value_type, value_data)\n\n # Check we wrote as many items as we thought.\n nkeys, nvalues, since_mod = QueryInfoKey(key)\n assert nkeys==1, \"Not the correct number of sub keys\"\n assert nvalues==1, \"Not the correct number of values\"\n nkeys, nvalues, since_mod = QueryInfoKey(sub_key)\n assert nkeys==0, \"Not the correct number of sub keys\"\n assert nvalues==len(test_data), \"Not the correct number of values\"\n # Close this key this way...\n # (but before we do, copy the key as an integer - this allows\n # us to test that the key really gets closed).\n int_sub_key = int(sub_key)\n CloseKey(sub_key)\n try:\n QueryInfoKey(int_sub_key)\n raise RuntimeError, \"It appears the CloseKey() function does not close the actual key!\"\n except EnvironmentError:\n pass\n # ... and close that key that way :-)\n int_key = int(key)\n key.Close()\n try:\n QueryInfoKey(int_key)\n raise RuntimeError, \"It appears the key.Close() function does not close the actual key!\"\n except EnvironmentError:\n pass\n\ndef ReadTestData(root_key):\n # Check we can get default value for this key.\n val = QueryValue(root_key, test_key_name)\n assert val==\"Default value\", \"Registry didn't give back the correct value\"\n\n key = OpenKey(root_key, test_key_name)\n # Read the sub-keys\n sub_key = OpenKey(key, \"sub_key\")\n # Check I can enumerate over the values.\n index = 0\n while 1:\n try:\n data = EnumValue(sub_key, index)\n except EnvironmentError:\n break\n assert data in test_data, \"Didn't read back the correct test data\"\n index = index + 1\n assert index==len(test_data), \"Didn't read the correct number of items\"\n # Check I can directly access each item\n for value_name, value_data, value_type in test_data:\n read_val, read_typ = QueryValueEx(sub_key, value_name)\n assert read_val==value_data and read_typ == value_type, \\\n \"Could not directly read the value\"\n sub_key.Close()\n # Enumerate our main key.\n read_val = EnumKey(key, 0)\n assert read_val == \"sub_key\", \"Read subkey value wrong\"\n try:\n EnumKey(key, 1)\n assert 0, \"Was able to get a second key when I only have one!\"\n except EnvironmentError:\n pass\n\n key.Close()\n\ndef DeleteTestData(root_key):\n key = OpenKey(root_key, test_key_name, 0, KEY_ALL_ACCESS)\n sub_key = OpenKey(key, \"sub_key\", 0, KEY_ALL_ACCESS)\n # It is not necessary to delete the values before deleting\n # the key (although subkeys must not exist). We delete them\n # manually just to prove we can :-)\n for value_name, value_data, value_type in test_data:\n DeleteValue(sub_key, value_name)\n\n nkeys, nvalues, since_mod = QueryInfoKey(sub_key)\n assert nkeys==0 and nvalues==0, \"subkey not empty before delete\"\n sub_key.Close()\n DeleteKey(key, \"sub_key\")\n\n try:\n # Shouldnt be able to delete it twice!\n DeleteKey(key, \"sub_key\")\n assert 0, \"Deleting the key twice succeeded\"\n except EnvironmentError:\n pass\n key.Close()\n DeleteKey(root_key, test_key_name)\n # Opening should now fail!\n try:\n key = OpenKey(root_key, test_key_name)\n assert 0, \"Could open the non-existent key\"\n except WindowsError: # Use this error name this time\n pass\n\ndef TestAll(root_key):\n WriteTestData(root_key)\n ReadTestData(root_key)\n DeleteTestData(root_key)\n\n# Test on my local machine.\nTestAll(HKEY_CURRENT_USER)\nprint \"Local registry tests worked\"\ntry:\n remote_name = sys.argv[sys.argv.index(\"--remote\")+1]\nexcept (IndexError, ValueError):\n remote_name = None\n\nif remote_name is not None:\n try:\n remote_key = ConnectRegistry(remote_name, HKEY_CURRENT_USER)\n except EnvironmentError, exc:\n print \"Could not connect to the remote machine -\", exc.strerror\n remote_key = None\n if remote_key is not None:\n TestAll(remote_key)\n print \"Remote registry tests worked\"\nelse:\n print \"Remote registry calls can be tested using\",\n print \"'test_winreg.py --remote \\\\\\\\machine_name'\"\n\n"},"license":{"kind":"string","value":"mit"}}},{"rowIdx":2767,"cells":{"repo_name":{"kind":"string","value":"JshWright/home-assistant"},"path":{"kind":"string","value":"tests/components/switch/test_command_line.py"},"copies":{"kind":"string","value":"25"},"size":{"kind":"string","value":"7031"},"content":{"kind":"string","value":"\"\"\"The tests for the Command line switch platform.\"\"\"\nimport json\nimport os\nimport tempfile\nimport unittest\n\nfrom homeassistant.setup import setup_component\nfrom homeassistant.const import STATE_ON, STATE_OFF\nimport homeassistant.components.switch as switch\nimport homeassistant.components.switch.command_line as command_line\n\nfrom tests.common import get_test_home_assistant\n\n\n# pylint: disable=invalid-name\nclass TestCommandSwitch(unittest.TestCase):\n \"\"\"Test the command switch.\"\"\"\n\n def setUp(self):\n \"\"\"Setup things to be run when tests are started.\"\"\"\n self.hass = get_test_home_assistant()\n\n def tearDown(self):\n \"\"\"Stop everything that was started.\"\"\"\n self.hass.stop()\n\n def test_state_none(self):\n \"\"\"Test with none state.\"\"\"\n with tempfile.TemporaryDirectory() as tempdirname:\n path = os.path.join(tempdirname, 'switch_status')\n test_switch = {\n 'command_on': 'echo 1 > {}'.format(path),\n 'command_off': 'echo 0 > {}'.format(path),\n }\n self.assertTrue(setup_component(self.hass, switch.DOMAIN, {\n 'switch': {\n 'platform': 'command_line',\n 'switches': {\n 'test': test_switch\n }\n }\n }))\n\n state = self.hass.states.get('switch.test')\n self.assertEqual(STATE_OFF, state.state)\n\n switch.turn_on(self.hass, 'switch.test')\n self.hass.block_till_done()\n\n state = self.hass.states.get('switch.test')\n self.assertEqual(STATE_ON, state.state)\n\n switch.turn_off(self.hass, 'switch.test')\n self.hass.block_till_done()\n\n state = self.hass.states.get('switch.test')\n self.assertEqual(STATE_OFF, state.state)\n\n def test_state_value(self):\n \"\"\"Test with state value.\"\"\"\n with tempfile.TemporaryDirectory() as tempdirname:\n path = os.path.join(tempdirname, 'switch_status')\n test_switch = {\n 'command_state': 'cat {}'.format(path),\n 'command_on': 'echo 1 > {}'.format(path),\n 'command_off': 'echo 0 > {}'.format(path),\n 'value_template': '{{ value==\"1\" }}'\n }\n self.assertTrue(setup_component(self.hass, switch.DOMAIN, {\n 'switch': {\n 'platform': 'command_line',\n 'switches': {\n 'test': test_switch\n }\n }\n }))\n\n state = self.hass.states.get('switch.test')\n self.assertEqual(STATE_OFF, state.state)\n\n switch.turn_on(self.hass, 'switch.test')\n self.hass.block_till_done()\n\n state = self.hass.states.get('switch.test')\n self.assertEqual(STATE_ON, state.state)\n\n switch.turn_off(self.hass, 'switch.test')\n self.hass.block_till_done()\n\n state = self.hass.states.get('switch.test')\n self.assertEqual(STATE_OFF, state.state)\n\n def test_state_json_value(self):\n \"\"\"Test with state JSON value.\"\"\"\n with tempfile.TemporaryDirectory() as tempdirname:\n path = os.path.join(tempdirname, 'switch_status')\n oncmd = json.dumps({'status': 'ok'})\n offcmd = json.dumps({'status': 'nope'})\n test_switch = {\n 'command_state': 'cat {}'.format(path),\n 'command_on': 'echo \\'{}\\' > {}'.format(oncmd, path),\n 'command_off': 'echo \\'{}\\' > {}'.format(offcmd, path),\n 'value_template': '{{ value_json.status==\"ok\" }}'\n }\n self.assertTrue(setup_component(self.hass, switch.DOMAIN, {\n 'switch': {\n 'platform': 'command_line',\n 'switches': {\n 'test': test_switch\n }\n }\n }))\n\n state = self.hass.states.get('switch.test')\n self.assertEqual(STATE_OFF, state.state)\n\n switch.turn_on(self.hass, 'switch.test')\n self.hass.block_till_done()\n\n state = self.hass.states.get('switch.test')\n self.assertEqual(STATE_ON, state.state)\n\n switch.turn_off(self.hass, 'switch.test')\n self.hass.block_till_done()\n\n state = self.hass.states.get('switch.test')\n self.assertEqual(STATE_OFF, state.state)\n\n def test_state_code(self):\n \"\"\"Test with state code.\"\"\"\n with tempfile.TemporaryDirectory() as tempdirname:\n path = os.path.join(tempdirname, 'switch_status')\n test_switch = {\n 'command_state': 'cat {}'.format(path),\n 'command_on': 'echo 1 > {}'.format(path),\n 'command_off': 'echo 0 > {}'.format(path),\n }\n self.assertTrue(setup_component(self.hass, switch.DOMAIN, {\n 'switch': {\n 'platform': 'command_line',\n 'switches': {\n 'test': test_switch\n }\n }\n }))\n\n state = self.hass.states.get('switch.test')\n self.assertEqual(STATE_OFF, state.state)\n\n switch.turn_on(self.hass, 'switch.test')\n self.hass.block_till_done()\n\n state = self.hass.states.get('switch.test')\n self.assertEqual(STATE_ON, state.state)\n\n switch.turn_off(self.hass, 'switch.test')\n self.hass.block_till_done()\n\n state = self.hass.states.get('switch.test')\n self.assertEqual(STATE_ON, state.state)\n\n def test_assumed_state_should_be_true_if_command_state_is_none(self):\n \"\"\"Test with state value.\"\"\"\n # args: hass, device_name, friendly_name, command_on, command_off,\n # command_state, value_template\n init_args = [\n self.hass,\n \"test_device_name\",\n \"Test friendly name!\",\n \"echo 'on command'\",\n \"echo 'off command'\",\n None,\n None,\n ]\n\n no_state_device = command_line.CommandSwitch(*init_args)\n self.assertTrue(no_state_device.assumed_state)\n\n # Set state command\n init_args[-2] = 'cat {}'\n\n state_device = command_line.CommandSwitch(*init_args)\n self.assertFalse(state_device.assumed_state)\n\n def test_entity_id_set_correctly(self):\n \"\"\"Test that entity_id is set correctly from object_id.\"\"\"\n init_args = [\n self.hass,\n \"test_device_name\",\n \"Test friendly name!\",\n \"echo 'on command'\",\n \"echo 'off command'\",\n False,\n None,\n ]\n\n test_switch = command_line.CommandSwitch(*init_args)\n self.assertEqual(test_switch.entity_id, 'switch.test_device_name')\n self.assertEqual(test_switch.name, 'Test friendly name!')\n"},"license":{"kind":"string","value":"apache-2.0"}}},{"rowIdx":2768,"cells":{"repo_name":{"kind":"string","value":"cloudfoundry/php-buildpack-legacy"},"path":{"kind":"string","value":"builds/runtimes/python-2.7.6/lib/python2.7/rexec.py"},"copies":{"kind":"string","value":"228"},"size":{"kind":"string","value":"20148"},"content":{"kind":"string","value":"\"\"\"Restricted execution facilities.\n\nThe class RExec exports methods r_exec(), r_eval(), r_execfile(), and\nr_import(), which correspond roughly to the built-in operations\nexec, eval(), execfile() and import, but executing the code in an\nenvironment that only exposes those built-in operations that are\ndeemed safe. To this end, a modest collection of 'fake' modules is\ncreated which mimics the standard modules by the same names. It is a\npolicy decision which built-in modules and operations are made\navailable; this module provides a reasonable default, but derived\nclasses can change the policies e.g. by overriding or extending class\nvariables like ok_builtin_modules or methods like make_sys().\n\nXXX To do:\n- r_open should allow writing tmp dir\n- r_exec etc. with explicit globals/locals? (Use rexec(\"exec ... in ...\")?)\n\n\"\"\"\nfrom warnings import warnpy3k\nwarnpy3k(\"the rexec module has been removed in Python 3.0\", stacklevel=2)\ndel warnpy3k\n\n\nimport sys\nimport __builtin__\nimport os\nimport ihooks\nimport imp\n\n__all__ = [\"RExec\"]\n\nclass FileBase:\n\n ok_file_methods = ('fileno', 'flush', 'isatty', 'read', 'readline',\n 'readlines', 'seek', 'tell', 'write', 'writelines', 'xreadlines',\n '__iter__')\n\n\nclass FileWrapper(FileBase):\n\n # XXX This is just like a Bastion -- should use that!\n\n def __init__(self, f):\n for m in self.ok_file_methods:\n if not hasattr(self, m) and hasattr(f, m):\n setattr(self, m, getattr(f, m))\n\n def close(self):\n self.flush()\n\n\nTEMPLATE = \"\"\"\ndef %s(self, *args):\n return getattr(self.mod, self.name).%s(*args)\n\"\"\"\n\nclass FileDelegate(FileBase):\n\n def __init__(self, mod, name):\n self.mod = mod\n self.name = name\n\n for m in FileBase.ok_file_methods + ('close',):\n exec TEMPLATE % (m, m)\n\n\nclass RHooks(ihooks.Hooks):\n\n def __init__(self, *args):\n # Hacks to support both old and new interfaces:\n # old interface was RHooks(rexec[, verbose])\n # new interface is RHooks([verbose])\n verbose = 0\n rexec = None\n if args and type(args[-1]) == type(0):\n verbose = args[-1]\n args = args[:-1]\n if args and hasattr(args[0], '__class__'):\n rexec = args[0]\n args = args[1:]\n if args:\n raise TypeError, \"too many arguments\"\n ihooks.Hooks.__init__(self, verbose)\n self.rexec = rexec\n\n def set_rexec(self, rexec):\n # Called by RExec instance to complete initialization\n self.rexec = rexec\n\n def get_suffixes(self):\n return self.rexec.get_suffixes()\n\n def is_builtin(self, name):\n return self.rexec.is_builtin(name)\n\n def init_builtin(self, name):\n m = __import__(name)\n return self.rexec.copy_except(m, ())\n\n def init_frozen(self, name): raise SystemError, \"don't use this\"\n def load_source(self, *args): raise SystemError, \"don't use this\"\n def load_compiled(self, *args): raise SystemError, \"don't use this\"\n def load_package(self, *args): raise SystemError, \"don't use this\"\n\n def load_dynamic(self, name, filename, file):\n return self.rexec.load_dynamic(name, filename, file)\n\n def add_module(self, name):\n return self.rexec.add_module(name)\n\n def modules_dict(self):\n return self.rexec.modules\n\n def default_path(self):\n return self.rexec.modules['sys'].path\n\n\n# XXX Backwards compatibility\nRModuleLoader = ihooks.FancyModuleLoader\nRModuleImporter = ihooks.ModuleImporter\n\n\nclass RExec(ihooks._Verbose):\n \"\"\"Basic restricted execution framework.\n\n Code executed in this restricted environment will only have access to\n modules and functions that are deemed safe; you can subclass RExec to\n add or remove capabilities as desired.\n\n The RExec class can prevent code from performing unsafe operations like\n reading or writing disk files, or using TCP/IP sockets. However, it does\n not protect against code using extremely large amounts of memory or\n processor time.\n\n \"\"\"\n\n ok_path = tuple(sys.path) # That's a policy decision\n\n ok_builtin_modules = ('audioop', 'array', 'binascii',\n 'cmath', 'errno', 'imageop',\n 'marshal', 'math', 'md5', 'operator',\n 'parser', 'select',\n 'sha', '_sre', 'strop', 'struct', 'time',\n '_weakref')\n\n ok_posix_names = ('error', 'fstat', 'listdir', 'lstat', 'readlink',\n 'stat', 'times', 'uname', 'getpid', 'getppid',\n 'getcwd', 'getuid', 'getgid', 'geteuid', 'getegid')\n\n ok_sys_names = ('byteorder', 'copyright', 'exit', 'getdefaultencoding',\n 'getrefcount', 'hexversion', 'maxint', 'maxunicode',\n 'platform', 'ps1', 'ps2', 'version', 'version_info')\n\n nok_builtin_names = ('open', 'file', 'reload', '__import__')\n\n ok_file_types = (imp.C_EXTENSION, imp.PY_SOURCE)\n\n def __init__(self, hooks = None, verbose = 0):\n \"\"\"Returns an instance of the RExec class.\n\n The hooks parameter is an instance of the RHooks class or a subclass\n of it. If it is omitted or None, the default RHooks class is\n instantiated.\n\n Whenever the RExec module searches for a module (even a built-in one)\n or reads a module's code, it doesn't actually go out to the file\n system itself. Rather, it calls methods of an RHooks instance that\n was passed to or created by its constructor. (Actually, the RExec\n object doesn't make these calls --- they are made by a module loader\n object that's part of the RExec object. This allows another level of\n flexibility, which can be useful when changing the mechanics of\n import within the restricted environment.)\n\n By providing an alternate RHooks object, we can control the file\n system accesses made to import a module, without changing the\n actual algorithm that controls the order in which those accesses are\n made. For instance, we could substitute an RHooks object that\n passes all filesystem requests to a file server elsewhere, via some\n RPC mechanism such as ILU. Grail's applet loader uses this to support\n importing applets from a URL for a directory.\n\n If the verbose parameter is true, additional debugging output may be\n sent to standard output.\n\n \"\"\"\n\n raise RuntimeError, \"This code is not secure in Python 2.2 and later\"\n\n ihooks._Verbose.__init__(self, verbose)\n # XXX There's a circular reference here:\n self.hooks = hooks or RHooks(verbose)\n self.hooks.set_rexec(self)\n self.modules = {}\n self.ok_dynamic_modules = self.ok_builtin_modules\n list = []\n for mname in self.ok_builtin_modules:\n if mname in sys.builtin_module_names:\n list.append(mname)\n self.ok_builtin_modules = tuple(list)\n self.set_trusted_path()\n self.make_builtin()\n self.make_initial_modules()\n # make_sys must be last because it adds the already created\n # modules to its builtin_module_names\n self.make_sys()\n self.loader = RModuleLoader(self.hooks, verbose)\n self.importer = RModuleImporter(self.loader, verbose)\n\n def set_trusted_path(self):\n # Set the path from which dynamic modules may be loaded.\n # Those dynamic modules must also occur in ok_builtin_modules\n self.trusted_path = filter(os.path.isabs, sys.path)\n\n def load_dynamic(self, name, filename, file):\n if name not in self.ok_dynamic_modules:\n raise ImportError, \"untrusted dynamic module: %s\" % name\n if name in sys.modules:\n src = sys.modules[name]\n else:\n src = imp.load_dynamic(name, filename, file)\n dst = self.copy_except(src, [])\n return dst\n\n def make_initial_modules(self):\n self.make_main()\n self.make_osname()\n\n # Helpers for RHooks\n\n def get_suffixes(self):\n return [item # (suff, mode, type)\n for item in imp.get_suffixes()\n if item[2] in self.ok_file_types]\n\n def is_builtin(self, mname):\n return mname in self.ok_builtin_modules\n\n # The make_* methods create specific built-in modules\n\n def make_builtin(self):\n m = self.copy_except(__builtin__, self.nok_builtin_names)\n m.__import__ = self.r_import\n m.reload = self.r_reload\n m.open = m.file = self.r_open\n\n def make_main(self):\n self.add_module('__main__')\n\n def make_osname(self):\n osname = os.name\n src = __import__(osname)\n dst = self.copy_only(src, self.ok_posix_names)\n dst.environ = e = {}\n for key, value in os.environ.items():\n e[key] = value\n\n def make_sys(self):\n m = self.copy_only(sys, self.ok_sys_names)\n m.modules = self.modules\n m.argv = ['RESTRICTED']\n m.path = map(None, self.ok_path)\n m.exc_info = self.r_exc_info\n m = self.modules['sys']\n l = self.modules.keys() + list(self.ok_builtin_modules)\n l.sort()\n m.builtin_module_names = tuple(l)\n\n # The copy_* methods copy existing modules with some changes\n\n def copy_except(self, src, exceptions):\n dst = self.copy_none(src)\n for name in dir(src):\n setattr(dst, name, getattr(src, name))\n for name in exceptions:\n try:\n delattr(dst, name)\n except AttributeError:\n pass\n return dst\n\n def copy_only(self, src, names):\n dst = self.copy_none(src)\n for name in names:\n try:\n value = getattr(src, name)\n except AttributeError:\n continue\n setattr(dst, name, value)\n return dst\n\n def copy_none(self, src):\n m = self.add_module(src.__name__)\n m.__doc__ = src.__doc__\n return m\n\n # Add a module -- return an existing module or create one\n\n def add_module(self, mname):\n m = self.modules.get(mname)\n if m is None:\n self.modules[mname] = m = self.hooks.new_module(mname)\n m.__builtins__ = self.modules['__builtin__']\n return m\n\n # The r* methods are public interfaces\n\n def r_exec(self, code):\n \"\"\"Execute code within a restricted environment.\n\n The code parameter must either be a string containing one or more\n lines of Python code, or a compiled code object, which will be\n executed in the restricted environment's __main__ module.\n\n \"\"\"\n m = self.add_module('__main__')\n exec code in m.__dict__\n\n def r_eval(self, code):\n \"\"\"Evaluate code within a restricted environment.\n\n The code parameter must either be a string containing a Python\n expression, or a compiled code object, which will be evaluated in\n the restricted environment's __main__ module. The value of the\n expression or code object will be returned.\n\n \"\"\"\n m = self.add_module('__main__')\n return eval(code, m.__dict__)\n\n def r_execfile(self, file):\n \"\"\"Execute the Python code in the file in the restricted\n environment's __main__ module.\n\n \"\"\"\n m = self.add_module('__main__')\n execfile(file, m.__dict__)\n\n def r_import(self, mname, globals={}, locals={}, fromlist=[]):\n \"\"\"Import a module, raising an ImportError exception if the module\n is considered unsafe.\n\n This method is implicitly called by code executing in the\n restricted environment. Overriding this method in a subclass is\n used to change the policies enforced by a restricted environment.\n\n \"\"\"\n return self.importer.import_module(mname, globals, locals, fromlist)\n\n def r_reload(self, m):\n \"\"\"Reload the module object, re-parsing and re-initializing it.\n\n This method is implicitly called by code executing in the\n restricted environment. Overriding this method in a subclass is\n used to change the policies enforced by a restricted environment.\n\n \"\"\"\n return self.importer.reload(m)\n\n def r_unload(self, m):\n \"\"\"Unload the module.\n\n Removes it from the restricted environment's sys.modules dictionary.\n\n This method is implicitly called by code executing in the\n restricted environment. Overriding this method in a subclass is\n used to change the policies enforced by a restricted environment.\n\n \"\"\"\n return self.importer.unload(m)\n\n # The s_* methods are similar but also swap std{in,out,err}\n\n def make_delegate_files(self):\n s = self.modules['sys']\n self.delegate_stdin = FileDelegate(s, 'stdin')\n self.delegate_stdout = FileDelegate(s, 'stdout')\n self.delegate_stderr = FileDelegate(s, 'stderr')\n self.restricted_stdin = FileWrapper(sys.stdin)\n self.restricted_stdout = FileWrapper(sys.stdout)\n self.restricted_stderr = FileWrapper(sys.stderr)\n\n def set_files(self):\n if not hasattr(self, 'save_stdin'):\n self.save_files()\n if not hasattr(self, 'delegate_stdin'):\n self.make_delegate_files()\n s = self.modules['sys']\n s.stdin = self.restricted_stdin\n s.stdout = self.restricted_stdout\n s.stderr = self.restricted_stderr\n sys.stdin = self.delegate_stdin\n sys.stdout = self.delegate_stdout\n sys.stderr = self.delegate_stderr\n\n def reset_files(self):\n self.restore_files()\n s = self.modules['sys']\n self.restricted_stdin = s.stdin\n self.restricted_stdout = s.stdout\n self.restricted_stderr = s.stderr\n\n\n def save_files(self):\n self.save_stdin = sys.stdin\n self.save_stdout = sys.stdout\n self.save_stderr = sys.stderr\n\n def restore_files(self):\n sys.stdin = self.save_stdin\n sys.stdout = self.save_stdout\n sys.stderr = self.save_stderr\n\n def s_apply(self, func, args=(), kw={}):\n self.save_files()\n try:\n self.set_files()\n r = func(*args, **kw)\n finally:\n self.restore_files()\n return r\n\n def s_exec(self, *args):\n \"\"\"Execute code within a restricted environment.\n\n Similar to the r_exec() method, but the code will be granted access\n to restricted versions of the standard I/O streams sys.stdin,\n sys.stderr, and sys.stdout.\n\n The code parameter must either be a string containing one or more\n lines of Python code, or a compiled code object, which will be\n executed in the restricted environment's __main__ module.\n\n \"\"\"\n return self.s_apply(self.r_exec, args)\n\n def s_eval(self, *args):\n \"\"\"Evaluate code within a restricted environment.\n\n Similar to the r_eval() method, but the code will be granted access\n to restricted versions of the standard I/O streams sys.stdin,\n sys.stderr, and sys.stdout.\n\n The code parameter must either be a string containing a Python\n expression, or a compiled code object, which will be evaluated in\n the restricted environment's __main__ module. The value of the\n expression or code object will be returned.\n\n \"\"\"\n return self.s_apply(self.r_eval, args)\n\n def s_execfile(self, *args):\n \"\"\"Execute the Python code in the file in the restricted\n environment's __main__ module.\n\n Similar to the r_execfile() method, but the code will be granted\n access to restricted versions of the standard I/O streams sys.stdin,\n sys.stderr, and sys.stdout.\n\n \"\"\"\n return self.s_apply(self.r_execfile, args)\n\n def s_import(self, *args):\n \"\"\"Import a module, raising an ImportError exception if the module\n is considered unsafe.\n\n This method is implicitly called by code executing in the\n restricted environment. Overriding this method in a subclass is\n used to change the policies enforced by a restricted environment.\n\n Similar to the r_import() method, but has access to restricted\n versions of the standard I/O streams sys.stdin, sys.stderr, and\n sys.stdout.\n\n \"\"\"\n return self.s_apply(self.r_import, args)\n\n def s_reload(self, *args):\n \"\"\"Reload the module object, re-parsing and re-initializing it.\n\n This method is implicitly called by code executing in the\n restricted environment. Overriding this method in a subclass is\n used to change the policies enforced by a restricted environment.\n\n Similar to the r_reload() method, but has access to restricted\n versions of the standard I/O streams sys.stdin, sys.stderr, and\n sys.stdout.\n\n \"\"\"\n return self.s_apply(self.r_reload, args)\n\n def s_unload(self, *args):\n \"\"\"Unload the module.\n\n Removes it from the restricted environment's sys.modules dictionary.\n\n This method is implicitly called by code executing in the\n restricted environment. Overriding this method in a subclass is\n used to change the policies enforced by a restricted environment.\n\n Similar to the r_unload() method, but has access to restricted\n versions of the standard I/O streams sys.stdin, sys.stderr, and\n sys.stdout.\n\n \"\"\"\n return self.s_apply(self.r_unload, args)\n\n # Restricted open(...)\n\n def r_open(self, file, mode='r', buf=-1):\n \"\"\"Method called when open() is called in the restricted environment.\n\n The arguments are identical to those of the open() function, and a\n file object (or a class instance compatible with file objects)\n should be returned. RExec's default behaviour is allow opening\n any file for reading, but forbidding any attempt to write a file.\n\n This method is implicitly called by code executing in the\n restricted environment. Overriding this method in a subclass is\n used to change the policies enforced by a restricted environment.\n\n \"\"\"\n mode = str(mode)\n if mode not in ('r', 'rb'):\n raise IOError, \"can't open files for writing in restricted mode\"\n return open(file, mode, buf)\n\n # Restricted version of sys.exc_info()\n\n def r_exc_info(self):\n ty, va, tr = sys.exc_info()\n tr = None\n return ty, va, tr\n\n\ndef test():\n import getopt, traceback\n opts, args = getopt.getopt(sys.argv[1:], 'vt:')\n verbose = 0\n trusted = []\n for o, a in opts:\n if o == '-v':\n verbose = verbose+1\n if o == '-t':\n trusted.append(a)\n r = RExec(verbose=verbose)\n if trusted:\n r.ok_builtin_modules = r.ok_builtin_modules + tuple(trusted)\n if args:\n r.modules['sys'].argv = args\n r.modules['sys'].path.insert(0, os.path.dirname(args[0]))\n else:\n r.modules['sys'].path.insert(0, \"\")\n fp = sys.stdin\n if args and args[0] != '-':\n try:\n fp = open(args[0])\n except IOError, msg:\n print \"%s: can't open file %r\" % (sys.argv[0], args[0])\n return 1\n if fp.isatty():\n try:\n import readline\n except ImportError:\n pass\n import code\n class RestrictedConsole(code.InteractiveConsole):\n def runcode(self, co):\n self.locals['__builtins__'] = r.modules['__builtin__']\n r.s_apply(code.InteractiveConsole.runcode, (self, co))\n try:\n RestrictedConsole(r.modules['__main__'].__dict__).interact()\n except SystemExit, n:\n return n\n else:\n text = fp.read()\n fp.close()\n c = compile(text, fp.name, 'exec')\n try:\n r.s_exec(c)\n except SystemExit, n:\n return n\n except:\n traceback.print_exc()\n return 1\n\n\nif __name__ == '__main__':\n sys.exit(test())\n"},"license":{"kind":"string","value":"mit"}}},{"rowIdx":2769,"cells":{"repo_name":{"kind":"string","value":"c0hen/django-venv"},"path":{"kind":"string","value":"lib/python3.4/site-packages/django/contrib/admin/helpers.py"},"copies":{"kind":"string","value":"27"},"size":{"kind":"string","value":"15048"},"content":{"kind":"string","value":"from __future__ import unicode_literals\n\nimport json\nimport warnings\n\nfrom django import forms\nfrom django.conf import settings\nfrom django.contrib.admin.utils import (\n display_for_field, flatten_fieldsets, help_text_for_field, label_for_field,\n lookup_field,\n)\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom django.db.models.fields.related import ManyToManyRel\nfrom django.forms.utils import flatatt\nfrom django.template.defaultfilters import capfirst, linebreaksbr\nfrom django.utils import six\nfrom django.utils.deprecation import RemovedInDjango20Warning\nfrom django.utils.encoding import force_text, smart_text\nfrom django.utils.html import conditional_escape, format_html\nfrom django.utils.safestring import mark_safe\nfrom django.utils.translation import ugettext, ugettext_lazy as _\n\nACTION_CHECKBOX_NAME = '_selected_action'\n\n\nclass ActionForm(forms.Form):\n action = forms.ChoiceField(label=_('Action:'))\n select_across = forms.BooleanField(\n label='',\n required=False,\n initial=0,\n widget=forms.HiddenInput({'class': 'select-across'}),\n )\n\ncheckbox = forms.CheckboxInput({'class': 'action-select'}, lambda value: False)\n\n\nclass AdminForm(object):\n def __init__(self, form, fieldsets, prepopulated_fields, readonly_fields=None, model_admin=None):\n self.form, self.fieldsets = form, fieldsets\n self.prepopulated_fields = [{\n 'field': form[field_name],\n 'dependencies': [form[f] for f in dependencies]\n } for field_name, dependencies in prepopulated_fields.items()]\n self.model_admin = model_admin\n if readonly_fields is None:\n readonly_fields = ()\n self.readonly_fields = readonly_fields\n\n def __iter__(self):\n for name, options in self.fieldsets:\n yield Fieldset(\n self.form, name,\n readonly_fields=self.readonly_fields,\n model_admin=self.model_admin,\n **options\n )\n\n def _media(self):\n media = self.form.media\n for fs in self:\n media = media + fs.media\n return media\n media = property(_media)\n\n\nclass Fieldset(object):\n def __init__(self, form, name=None, readonly_fields=(), fields=(), classes=(),\n description=None, model_admin=None):\n self.form = form\n self.name, self.fields = name, fields\n self.classes = ' '.join(classes)\n self.description = description\n self.model_admin = model_admin\n self.readonly_fields = readonly_fields\n\n def _media(self):\n if 'collapse' in self.classes:\n extra = '' if settings.DEBUG else '.min'\n js = [\n 'vendor/jquery/jquery%s.js' % extra,\n 'jquery.init.js',\n 'collapse%s.js' % extra,\n ]\n return forms.Media(js=['admin/js/%s' % url for url in js])\n return forms.Media()\n media = property(_media)\n\n def __iter__(self):\n for field in self.fields:\n yield Fieldline(self.form, field, self.readonly_fields, model_admin=self.model_admin)\n\n\nclass Fieldline(object):\n def __init__(self, form, field, readonly_fields=None, model_admin=None):\n self.form = form # A django.forms.Form instance\n if not hasattr(field, \"__iter__\") or isinstance(field, six.text_type):\n self.fields = [field]\n else:\n self.fields = field\n self.has_visible_field = not all(\n field in self.form.fields and self.form.fields[field].widget.is_hidden\n for field in self.fields\n )\n self.model_admin = model_admin\n if readonly_fields is None:\n readonly_fields = ()\n self.readonly_fields = readonly_fields\n\n def __iter__(self):\n for i, field in enumerate(self.fields):\n if field in self.readonly_fields:\n yield AdminReadonlyField(self.form, field, is_first=(i == 0), model_admin=self.model_admin)\n else:\n yield AdminField(self.form, field, is_first=(i == 0))\n\n def errors(self):\n return mark_safe(\n '\\n'.join(\n self.form[f].errors.as_ul() for f in self.fields if f not in self.readonly_fields\n ).strip('\\n')\n )\n\n\nclass AdminField(object):\n def __init__(self, form, field, is_first):\n self.field = form[field] # A django.forms.BoundField instance\n self.is_first = is_first # Whether this field is first on the line\n self.is_checkbox = isinstance(self.field.field.widget, forms.CheckboxInput)\n self.is_readonly = False\n\n def label_tag(self):\n classes = []\n contents = conditional_escape(force_text(self.field.label))\n if self.is_checkbox:\n classes.append('vCheckboxLabel')\n\n if self.field.field.required:\n classes.append('required')\n if not self.is_first:\n classes.append('inline')\n attrs = {'class': ' '.join(classes)} if classes else {}\n # checkboxes should not have a label suffix as the checkbox appears\n # to the left of the label.\n return self.field.label_tag(\n contents=mark_safe(contents), attrs=attrs,\n label_suffix='' if self.is_checkbox else None,\n )\n\n def errors(self):\n return mark_safe(self.field.errors.as_ul())\n\n\nclass AdminReadonlyField(object):\n def __init__(self, form, field, is_first, model_admin=None):\n # Make self.field look a little bit like a field. This means that\n # {{ field.name }} must be a useful class name to identify the field.\n # For convenience, store other field-related data here too.\n if callable(field):\n class_name = field.__name__ if field.__name__ != '' else ''\n else:\n class_name = field\n\n if form._meta.labels and class_name in form._meta.labels:\n label = form._meta.labels[class_name]\n else:\n label = label_for_field(field, form._meta.model, model_admin)\n\n if form._meta.help_texts and class_name in form._meta.help_texts:\n help_text = form._meta.help_texts[class_name]\n else:\n help_text = help_text_for_field(class_name, form._meta.model)\n\n self.field = {\n 'name': class_name,\n 'label': label,\n 'help_text': help_text,\n 'field': field,\n }\n self.form = form\n self.model_admin = model_admin\n self.is_first = is_first\n self.is_checkbox = False\n self.is_readonly = True\n self.empty_value_display = model_admin.get_empty_value_display()\n\n def label_tag(self):\n attrs = {}\n if not self.is_first:\n attrs[\"class\"] = \"inline\"\n label = self.field['label']\n return format_html('{}:',\n flatatt(attrs),\n capfirst(force_text(label)))\n\n def contents(self):\n from django.contrib.admin.templatetags.admin_list import _boolean_icon\n field, obj, model_admin = self.field['field'], self.form.instance, self.model_admin\n try:\n f, attr, value = lookup_field(field, obj, model_admin)\n except (AttributeError, ValueError, ObjectDoesNotExist):\n result_repr = self.empty_value_display\n else:\n if f is None:\n boolean = getattr(attr, \"boolean\", False)\n if boolean:\n result_repr = _boolean_icon(value)\n else:\n if hasattr(value, \"__html__\"):\n result_repr = value\n else:\n result_repr = smart_text(value)\n if getattr(attr, \"allow_tags\", False):\n warnings.warn(\n \"Deprecated allow_tags attribute used on %s. \"\n \"Use django.utils.html.format_html(), format_html_join(), \"\n \"or django.utils.safestring.mark_safe() instead.\" % attr,\n RemovedInDjango20Warning\n )\n result_repr = mark_safe(value)\n else:\n result_repr = linebreaksbr(result_repr)\n else:\n if isinstance(f.remote_field, ManyToManyRel) and value is not None:\n result_repr = \", \".join(map(six.text_type, value.all()))\n else:\n result_repr = display_for_field(value, f, self.empty_value_display)\n result_repr = linebreaksbr(result_repr)\n return conditional_escape(result_repr)\n\n\nclass InlineAdminFormSet(object):\n \"\"\"\n A wrapper around an inline formset for use in the admin system.\n \"\"\"\n def __init__(self, inline, formset, fieldsets, prepopulated_fields=None,\n readonly_fields=None, model_admin=None):\n self.opts = inline\n self.formset = formset\n self.fieldsets = fieldsets\n self.model_admin = model_admin\n if readonly_fields is None:\n readonly_fields = ()\n self.readonly_fields = readonly_fields\n if prepopulated_fields is None:\n prepopulated_fields = {}\n self.prepopulated_fields = prepopulated_fields\n self.classes = ' '.join(inline.classes) if inline.classes else ''\n\n def __iter__(self):\n for form, original in zip(self.formset.initial_forms, self.formset.get_queryset()):\n view_on_site_url = self.opts.get_view_on_site_url(original)\n yield InlineAdminForm(\n self.formset, form, self.fieldsets, self.prepopulated_fields,\n original, self.readonly_fields, model_admin=self.opts,\n view_on_site_url=view_on_site_url,\n )\n for form in self.formset.extra_forms:\n yield InlineAdminForm(\n self.formset, form, self.fieldsets, self.prepopulated_fields,\n None, self.readonly_fields, model_admin=self.opts,\n )\n yield InlineAdminForm(\n self.formset, self.formset.empty_form,\n self.fieldsets, self.prepopulated_fields, None,\n self.readonly_fields, model_admin=self.opts,\n )\n\n def fields(self):\n fk = getattr(self.formset, \"fk\", None)\n for i, field_name in enumerate(flatten_fieldsets(self.fieldsets)):\n if fk and fk.name == field_name:\n continue\n if field_name in self.readonly_fields:\n yield {\n 'label': label_for_field(field_name, self.opts.model, self.opts),\n 'widget': {'is_hidden': False},\n 'required': False,\n 'help_text': help_text_for_field(field_name, self.opts.model),\n }\n else:\n form_field = self.formset.empty_form.fields[field_name]\n label = form_field.label\n if label is None:\n label = label_for_field(field_name, self.opts.model, self.opts)\n yield {\n 'label': label,\n 'widget': form_field.widget,\n 'required': form_field.required,\n 'help_text': form_field.help_text,\n }\n\n def inline_formset_data(self):\n verbose_name = self.opts.verbose_name\n return json.dumps({\n 'name': '#%s' % self.formset.prefix,\n 'options': {\n 'prefix': self.formset.prefix,\n 'addText': ugettext('Add another %(verbose_name)s') % {\n 'verbose_name': capfirst(verbose_name),\n },\n 'deleteText': ugettext('Remove'),\n }\n })\n\n def _media(self):\n media = self.opts.media + self.formset.media\n for fs in self:\n media = media + fs.media\n return media\n media = property(_media)\n\n\nclass InlineAdminForm(AdminForm):\n \"\"\"\n A wrapper around an inline form for use in the admin system.\n \"\"\"\n def __init__(self, formset, form, fieldsets, prepopulated_fields, original,\n readonly_fields=None, model_admin=None, view_on_site_url=None):\n self.formset = formset\n self.model_admin = model_admin\n self.original = original\n self.show_url = original and view_on_site_url is not None\n self.absolute_url = view_on_site_url\n super(InlineAdminForm, self).__init__(form, fieldsets, prepopulated_fields, readonly_fields, model_admin)\n\n def __iter__(self):\n for name, options in self.fieldsets:\n yield InlineFieldset(\n self.formset, self.form, name, self.readonly_fields,\n model_admin=self.model_admin, **options\n )\n\n def needs_explicit_pk_field(self):\n # Auto fields are editable (oddly), so need to check for auto or non-editable pk\n if self.form._meta.model._meta.has_auto_field or not self.form._meta.model._meta.pk.editable:\n return True\n # Also search any parents for an auto field. (The pk info is propagated to child\n # models so that does not need to be checked in parents.)\n for parent in self.form._meta.model._meta.get_parent_list():\n if parent._meta.has_auto_field:\n return True\n return False\n\n def pk_field(self):\n return AdminField(self.form, self.formset._pk_field.name, False)\n\n def fk_field(self):\n fk = getattr(self.formset, \"fk\", None)\n if fk:\n return AdminField(self.form, fk.name, False)\n else:\n return \"\"\n\n def deletion_field(self):\n from django.forms.formsets import DELETION_FIELD_NAME\n return AdminField(self.form, DELETION_FIELD_NAME, False)\n\n def ordering_field(self):\n from django.forms.formsets import ORDERING_FIELD_NAME\n return AdminField(self.form, ORDERING_FIELD_NAME, False)\n\n\nclass InlineFieldset(Fieldset):\n def __init__(self, formset, *args, **kwargs):\n self.formset = formset\n super(InlineFieldset, self).__init__(*args, **kwargs)\n\n def __iter__(self):\n fk = getattr(self.formset, \"fk\", None)\n for field in self.fields:\n if fk and fk.name == field:\n continue\n yield Fieldline(self.form, field, self.readonly_fields, model_admin=self.model_admin)\n\n\nclass AdminErrorList(forms.utils.ErrorList):\n \"\"\"\n Stores all errors for the form/formsets in an add/change stage view.\n \"\"\"\n def __init__(self, form, inline_formsets):\n super(AdminErrorList, self).__init__()\n\n if form.is_bound:\n self.extend(form.errors.values())\n for inline_formset in inline_formsets:\n self.extend(inline_formset.non_form_errors())\n for errors_in_inline_form in inline_formset.errors:\n self.extend(errors_in_inline_form.values())\n"},"license":{"kind":"string","value":"gpl-3.0"}}},{"rowIdx":2770,"cells":{"repo_name":{"kind":"string","value":"hoangt/gem5v"},"path":{"kind":"string","value":"src/mem/ruby/network/garnet/fixed-pipeline/GarnetLink_d.py"},"copies":{"kind":"string","value":"18"},"size":{"kind":"string","value":"3743"},"content":{"kind":"string","value":"# Copyright (c) 2008 Princeton University\n# Copyright (c) 2009 Advanced Micro Devices, Inc.\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are\n# met: redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer;\n# redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in the\n# documentation and/or other materials provided with the distribution;\n# neither the name of the copyright holders nor the names of its\n# contributors may be used to endorse or promote products derived from\n# this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n# \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n#\n# Authors: Steve Reinhardt\n# Brad Beckmann\n\nfrom m5.params import *\nfrom m5.proxy import *\nfrom m5.SimObject import SimObject\nfrom BasicLink import BasicIntLink, BasicExtLink\n\nclass NetworkLink_d(SimObject):\n type = 'NetworkLink_d'\n link_id = Param.Int(Parent.link_id, \"link id\")\n link_latency = Param.Int(Parent.latency, \"link latency\")\n vcs_per_vnet = Param.Int(Parent.vcs_per_vnet,\n \"virtual channels per virtual network\")\n virt_nets = Param.Int(Parent.number_of_virtual_networks,\n \"number of virtual networks\")\n channel_width = Param.Int(Parent.bandwidth_factor,\n \"channel width == bw factor\")\n\nclass CreditLink_d(NetworkLink_d):\n type = 'CreditLink_d'\n\n# Interior fixed pipeline links between routers\nclass GarnetIntLink_d(BasicIntLink):\n type = 'GarnetIntLink_d'\n # The detailed fixed pipeline bi-directional link include two main\n # forward links and two backward flow-control links, one per direction\n nls = []\n # In uni-directional link\n nls.append(NetworkLink_d()); \n # Out uni-directional link\n nls.append(NetworkLink_d());\n network_links = VectorParam.NetworkLink_d(nls, \"forward links\")\n\n cls = []\n # In uni-directional link\n cls.append(CreditLink_d());\n # Out uni-directional link\n cls.append(CreditLink_d());\n credit_links = VectorParam.CreditLink_d(cls, \"backward flow-control links\")\n\n# Exterior fixed pipeline links between a router and a controller\nclass GarnetExtLink_d(BasicExtLink):\n type = 'GarnetExtLink_d'\n # The detailed fixed pipeline bi-directional link include two main\n # forward links and two backward flow-control links, one per direction\n nls = []\n # In uni-directional link\n nls.append(NetworkLink_d());\n # Out uni-directional link\n nls.append(NetworkLink_d());\n network_links = VectorParam.NetworkLink_d(nls, \"forward links\")\n\n cls = []\n # In uni-directional link\n cls.append(CreditLink_d());\n # Out uni-directional link\n cls.append(CreditLink_d());\n credit_links = VectorParam.CreditLink_d(cls, \"backward flow-control links\")\n"},"license":{"kind":"string","value":"bsd-3-clause"}}},{"rowIdx":2771,"cells":{"repo_name":{"kind":"string","value":"cryptickp/troposphere"},"path":{"kind":"string","value":"examples/CloudFront_S3.py"},"copies":{"kind":"string","value":"22"},"size":{"kind":"string","value":"1622"},"content":{"kind":"string","value":"# Converted from CloudFront_S3.template located at:\n# http://aws.amazon.com/cloudformation/aws-cloudformation-templates/\n\nfrom troposphere import GetAtt, Join, Output\nfrom troposphere import Parameter, Ref, Template\nfrom troposphere.cloudfront import Distribution, DistributionConfig\nfrom troposphere.cloudfront import Origin, DefaultCacheBehavior\nfrom troposphere.cloudfront import ForwardedValues\n\n\nt = Template()\n\nt.add_description(\n \"AWS CloudFormation Sample Template CloudFront_S3: Sample template \"\n \"showing how to create an Amazon CloudFront distribution using an \"\n \"S3 origin. \"\n \"**WARNING** This template creates a CloudFront distribution. \"\n \"You will be billed for the AWS resources used if you create \"\n \"a stack from this template.\")\n\ns3dnsname = t.add_parameter(Parameter(\n \"S3DNSNAme\",\n Description=\"The DNS name of an existing S3 bucket to use as the \"\n \"Cloudfront distribution origin\",\n Type=\"String\",\n))\n\nmyDistribution = t.add_resource(Distribution(\n \"myDistribution\",\n DistributionConfig=DistributionConfig(\n Origins=[Origin(Id=\"Origin 1\", DomainName=Ref(s3dnsname))],\n DefaultCacheBehavior=DefaultCacheBehavior(\n TargetOriginId=\"Origin 1\",\n ForwardedValues=ForwardedValues(\n QueryString=False\n ),\n ViewerProtocolPolicy=\"allow-all\"),\n Enabled=True\n )\n))\n\nt.add_output([\n Output(\"DistributionId\", Value=Ref(myDistribution)),\n Output(\n \"DistributionName\",\n Value=Join(\"\", [\"http://\", GetAtt(myDistribution, \"DomainName\")])),\n])\n\nprint(t.to_json())\n"},"license":{"kind":"string","value":"bsd-2-clause"}}},{"rowIdx":2772,"cells":{"repo_name":{"kind":"string","value":"nelmiux/CarnotKE"},"path":{"kind":"string","value":"jyhton/Lib/test/clamp.py"},"copies":{"kind":"string","value":"12"},"size":{"kind":"string","value":"2254"},"content":{"kind":"string","value":"import java\nimport os\nimport os.path\n\nfrom java.lang.reflect import Modifier\nfrom org.python.util import CodegenUtils\nfrom org.python.compiler import CustomMaker, ProxyCodeHelpers\n\n\n__all__ = [\"PackageProxy\", \"SerializableProxies\"]\n\n\nclass SerializableProxies(CustomMaker):\n\n # NOTE: SerializableProxies is itself a java proxy, but it's not a custom one!\n\n serialized_path = None\n \n def doConstants(self):\n self.classfile.addField(\"serialVersionUID\",\n CodegenUtils.ci(java.lang.Long.TYPE), Modifier.PUBLIC | Modifier.STATIC | Modifier.FINAL)\n code = self.classfile.addMethod(\"\", ProxyCodeHelpers.makeSig(\"V\"), Modifier.STATIC)\n code.visitLdcInsn(java.lang.Long(1))\n code.putstatic(self.classfile.name, \"serialVersionUID\", CodegenUtils.ci(java.lang.Long.TYPE))\n code.return_()\n\n def saveBytes(self, bytes):\n if self.serialized_path:\n path = os.path.join(self.serialized_path, os.path.join(*self.myClass.split(\".\")) + \".class\")\n parent = os.path.dirname(path)\n try:\n os.makedirs(parent)\n except OSError:\n pass # Directory exists\n with open(path, \"wb\") as f:\n f.write(bytes.toByteArray())\n\n def makeClass(self):\n try:\n # If already defined on CLASSPATH, simply return this class\n cls = java.lang.Class.forName(self.myClass)\n print \"Class defined on CLASSPATH\", cls\n except:\n # Otherwise build it\n cls = CustomMaker.makeClass(self)\n return cls\n\n\nclass PackageProxy(object):\n\n def __init__(self, package):\n self.package = package\n \n def __call__(self, superclass, interfaces, className, pythonModuleName, fullProxyName, mapping):\n \"\"\"Constructs a usable proxy name that does not depend on ordering\"\"\"\n if \".\" in pythonModuleName:\n # get around that will be called differently from regrtest, as test.module instead of module\n pythonModuleName = pythonModuleName.split(\".\")[-1]\n return SerializableProxies(superclass, interfaces, className, pythonModuleName, self.package + \".\" + pythonModuleName + \".\" + className, mapping)\n\n"},"license":{"kind":"string","value":"apache-2.0"}}},{"rowIdx":2773,"cells":{"repo_name":{"kind":"string","value":"minifirocks/nifi-minifi-cpp"},"path":{"kind":"string","value":"thirdparty/rocksdb/buckifier/targets_cfg.py"},"copies":{"kind":"string","value":"6"},"size":{"kind":"string","value":"3002"},"content":{"kind":"string","value":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\nrocksdb_target_header = \"\"\"\nimport os\n\nTARGETS_PATH = os.path.dirname(__file__)\nREPO_PATH = \"rocksdb/src/\"\nBUCK_BINS = \"buck-out/gen/\" + REPO_PATH\nTEST_RUNNER = REPO_PATH + \"buckifier/rocks_test_runner.sh\"\nrocksdb_compiler_flags = [\n \"-fno-builtin-memcmp\",\n \"-DROCKSDB_PLATFORM_POSIX\",\n \"-DROCKSDB_LIB_IO_POSIX\",\n \"-DROCKSDB_FALLOCATE_PRESENT\",\n \"-DROCKSDB_MALLOC_USABLE_SIZE\",\n \"-DROCKSDB_RANGESYNC_PRESENT\",\n \"-DROCKSDB_SCHED_GETCPU_PRESENT\",\n \"-DROCKSDB_SUPPORT_THREAD_LOCAL\",\n \"-DOS_LINUX\",\n # Flags to enable libs we include\n \"-DSNAPPY\",\n \"-DZLIB\",\n \"-DBZIP2\",\n \"-DLZ4\",\n \"-DZSTD\",\n \"-DGFLAGS=gflags\",\n \"-DNUMA\",\n \"-DTBB\",\n # Needed to compile in fbcode\n \"-Wno-expansion-to-defined\",\n]\n\nrocksdb_external_deps = [\n ('bzip2', None, 'bz2'),\n ('snappy', None, \"snappy\"),\n ('zlib', None, 'z'),\n ('gflags', None, 'gflags'),\n ('lz4', None, 'lz4'),\n ('zstd', None),\n ('tbb', None),\n (\"numa\", None, \"numa\"),\n (\"googletest\", None, \"gtest\"),\n]\n\nrocksdb_preprocessor_flags = [\n # Directories with files for #include\n \"-I\" + REPO_PATH + \"include/\",\n \"-I\" + REPO_PATH,\n]\n\nrocksdb_arch_preprocessor_flags = {\n \"x86_64\": [\"-DHAVE_SSE42\"],\n}\n\"\"\"\n\n\nlibrary_template = \"\"\"\ncpp_library(\n name = \"%s\",\n headers = %s,\n srcs = [%s],\n deps = [%s],\n preprocessor_flags = rocksdb_preprocessor_flags,\n arch_preprocessor_flags = rocksdb_arch_preprocessor_flags,\n compiler_flags = rocksdb_compiler_flags,\n external_deps = rocksdb_external_deps,\n)\n\"\"\"\n\nbinary_template = \"\"\"\ncpp_binary(\n name = \"%s\",\n srcs = [%s],\n deps = [%s],\n preprocessor_flags = rocksdb_preprocessor_flags,\n arch_preprocessor_flags = rocksdb_arch_preprocessor_flags,\n compiler_flags = rocksdb_compiler_flags,\n external_deps = rocksdb_external_deps,\n)\n\"\"\"\n\nunittests_template = \"\"\"\n# [test_name, test_src, test_type]\nROCKS_TESTS = %s\n\n\n# Generate a test rule for each entry in ROCKS_TESTS\nfor test_cfg in ROCKS_TESTS:\n test_name = test_cfg[0]\n test_cc = test_cfg[1]\n ttype = \"gtest\" if test_cfg[2] == \"parallel\" else \"simple\"\n test_bin = test_name + \"_bin\"\n\n cpp_binary (\n name = test_bin,\n srcs = [test_cc],\n deps = [\":rocksdb_test_lib\"],\n preprocessor_flags = rocksdb_preprocessor_flags,\n arch_preprocessor_flags = rocksdb_arch_preprocessor_flags,\n compiler_flags = rocksdb_compiler_flags,\n external_deps = rocksdb_external_deps,\n )\n\n custom_unittest(\n name = test_name,\n type = ttype,\n deps = [\":\" + test_bin],\n command = [TEST_RUNNER, BUCK_BINS + test_bin]\n )\n\ncustom_unittest(\n name = \"make_rocksdbjavastatic\",\n type = \"simple\",\n command = [\"internal_repo_rocksdb/make_rocksdbjavastatic.sh\"],\n)\n\ncustom_unittest(\n name = \"make_rocksdb_lite_release\",\n type = \"simple\",\n command = [\"internal_repo_rocksdb/make_rocksdb_lite_release.sh\"],\n)\n\"\"\"\n"},"license":{"kind":"string","value":"apache-2.0"}}},{"rowIdx":2774,"cells":{"repo_name":{"kind":"string","value":"YufeiZhang/Principles-of-Programming-Python-3"},"path":{"kind":"string","value":"Lectures/Lecture_6/k_means_clustering.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"10239"},"content":{"kind":"string","value":"# Written by Eric Martin for COMP9021\n\n\nimport tkinter as tk\nimport tkinter.messagebox\n\n\nclass KMeansClustering(tk.Tk):\n def __init__(self):\n tk.Tk.__init__(self)\n self.title('k-means clustering')\n menubar = tk.Menu()\n help_menu = tk.Menu(menubar)\n menubar.add_cascade(label = 'k-means Clustering Help', menu = help_menu)\n help_menu.add_command(label = 'Principle', command = self.principle_help)\n help_menu.add_command(label = 'Clearing', command = self.clearing_help)\n help_menu.add_command(label = 'Creating points and initial centroids',\n command = self.creating_points_and_initial_centroids_help)\n self.config(menu = menubar)\n\n self.space = Space()\n buttons = tk.Frame(bd = 20)\n self.configure_space_or_cluster_button = tk.Button(buttons, text = 'Cluster', width = 5,\n command = self.configure_space_or_cluster)\n self.configure_space_or_cluster_button.pack(padx = 30, side = tk.LEFT)\n self.clear_or_iterate_button = tk.Button(buttons, text = 'Clear', width = 5,\n command = self.clear_or_iterate)\n self.clear_or_iterate_button.pack(padx = 30)\n buttons.pack()\n self.space.pack()\n self.clustering = False\n\n def principle_help(self):\n tkinter.messagebox.showinfo('Principle',\n 'k, a positive integer which here can only be at most equal to 6, represents '\n 'the number of clusters to be created.\\n\\n'\n 'After the user has created a number of (round) points, the button displaying \"Cluster\" '\n 'can be clicked, and then the user can create k (square) points, or \"centroids\", '\n 'displayed in different colors.\\n'\n 'Clicking the button displaying \"Iterate\" gives each point the colour of the closest '\n 'centroid, making that point a member of the cluster associated with that colour.\\n\\n'\n 'The centre of gravity of each cluster then becomes the new centroid. '\n 'The same computation can be done again by clicking the button displaying \"Iterate\", '\n 'until the clusters do not change any more, in which case the button labels change and '\n 'the user is in a position to run another experiment.\\n\\n'\n 'The user can also click the button displaying \"Stop\" to get back to that position, and '\n 'change her mind by clicking again on the button displaying \"Cluster\".')\n\n def clearing_help(self):\n tkinter.messagebox.showinfo('Clearing',\n 'In case centroids are displayed, clicking the \"Clear\" button deletes the centroids, and '\n 'if the points are coloured because they have been clustered, then they lose their '\n 'colour.\\n\\n'\n 'In case no centroid is displayed, possibly because the \"Clear\" button has just been '\n 'clicked, then clicking the \"Clear\" button deletes all points.')\n\n def creating_points_and_initial_centroids_help(self):\n tkinter.messagebox.showinfo('Creating points and initial centroids',\n 'Points and initial centroids are created simply by clicking in the grey area.\\n'\n 'Clicking on an existing point or initial centroid deletes it.\\n'\n 'No point or centroid is created when it is too close to an existing point or centroid, '\n 'respectively.\\n\\n'\n 'There can be at most 6 centroids. Trying to create more will have no effect.')\n\n def configure_space_or_cluster(self):\n if self.clustering:\n self.configure_space_or_cluster_button.config(text = 'Cluster')\n self.clear_or_iterate_button.config(text = 'Clear')\n self.clustering = False\n self.space.clustering = False\n self.space.nb_of_clusters = 0\n else:\n self.configure_space_or_cluster_button.config(text = 'Stop')\n self.clear_or_iterate_button.config(text = 'Iterate')\n self.clustering = True\n self.space.clustering = True\n\n def clear_or_iterate(self):\n if self.clustering:\n if not self.space.iterate():\n self.configure_space_or_cluster()\n else:\n self.space.clear()\n\n\nclass Space(tk.Frame):\n space_dim = 600\n space_colour = '#F5F5F5'\n point_colour = '#808080'\n\n def __init__(self):\n tk.Frame.__init__(self, padx = 20, pady = 20)\n self.space = tk.Canvas(self, width = self.space_dim, height = self.space_dim, bg = self.space_colour)\n self.space.bind('<1>', self.act_on_click)\n self.space.pack()\n self.points = {}\n self.centroids = {}\n self.colours = 'red', 'green', 'blue', 'cyan', 'black', 'magenta'\n self.available_colours = list(self.colours)\n self.clustering = False\n\n def clear(self):\n if self.centroids:\n for centroid_coordinates in self.centroids:\n self.space.itemconfig(self.centroids[centroid_coordinates].drawn_point, fill = '',\n outline = '')\n self.centroids.clear()\n for point_coordinates in self.points:\n self.points[point_coordinates].colour = self.point_colour\n self.space.itemconfig(self.points[point_coordinates].drawn_point, fill = self.point_colour,\n outline = self.point_colour)\n self.available_colours = list(self.colours)\n else:\n for point_coordinates in self.points:\n self.space.itemconfig(self.points[point_coordinates].drawn_point, fill = '',\n outline = '')\n self.points.clear()\n\n def act_on_click(self, event):\n x = self.space.canvasx(event.x)\n y = self.space.canvasx(event.y)\n if x < 10 or x > self.space_dim - 5 or y < 10 or y > self.space_dim - 5:\n return\n coordinates = x, y\n if self.clustering:\n if (self.request_point_otherwise_delete_or_ignore(coordinates, self.centroids, 8) and\n self.available_colours):\n colour = self.available_colours.pop()\n self.centroids[coordinates] = Point(self.draw_centroid(x, y, colour), colour)\n else:\n if self.request_point_otherwise_delete_or_ignore(coordinates, self.points, 25):\n self.points[coordinates] = Point(self.space.create_oval(x - 2, y - 2, x + 2, y + 2,\n fill = self.point_colour,\n outline = self.point_colour),\n self.point_colour)\n\n def request_point_otherwise_delete_or_ignore(self, coordinates, points, size):\n for point_coordinates in points:\n if self.square_of_distance(coordinates, point_coordinates) < size:\n self.space.itemconfig(points[point_coordinates].drawn_point, fill = '', outline = '')\n colour = points[point_coordinates].colour\n if colour != self.point_colour:\n self.available_colours.append(colour)\n del points[point_coordinates]\n return False\n if any(self.square_of_distance(coordinates, point_coordinates) < 4 * size\n for point_coordinates in points):\n return False\n return True\n\n def square_of_distance(self, coordinates_1, coordinates_2):\n return (coordinates_1[0] - coordinates_2[0]) ** 2 + (coordinates_1[1] - coordinates_2[1]) ** 2\n\n def iterate(self):\n clusters = {centroid_coordinates: [] for centroid_coordinates in self.centroids}\n if not clusters:\n return\n different_clustering = False\n for point_coordinates in self.points:\n min_square_of_distance = float('inf')\n for centroid_coordinates in self.centroids:\n square_of_distance = self.square_of_distance(point_coordinates, centroid_coordinates)\n if square_of_distance < min_square_of_distance:\n min_square_of_distance = square_of_distance\n closest_centroid_coordinates = centroid_coordinates\n colour = self.centroids[closest_centroid_coordinates].colour\n if self.points[point_coordinates].colour != colour:\n self.points[point_coordinates].colour = colour\n self.space.itemconfig(self.points[point_coordinates].drawn_point, fill = colour,\n outline = colour)\n different_clustering = True\n clusters[closest_centroid_coordinates].append(point_coordinates)\n for centroid_coordinates in clusters:\n nb_of_points = len(clusters[centroid_coordinates])\n if nb_of_points:\n x, y = tuple(map(sum, zip(*clusters[centroid_coordinates])))\n clusters[centroid_coordinates] = x / nb_of_points, y / nb_of_points\n for centroid_coordinates in self.centroids:\n self.space.itemconfig(self.centroids[centroid_coordinates].drawn_point, fill = '',\n outline = '')\n updated_centroids = {}\n for centroid_coordinates in clusters:\n if clusters[centroid_coordinates]:\n colour = self.centroids[centroid_coordinates].colour\n x, y = clusters[centroid_coordinates]\n updated_centroids[(x, y)] = Point(self.draw_centroid(x, y, colour), colour)\n self.centroids = updated_centroids\n return different_clustering\n\n def draw_centroid(self, x, y, colour):\n return self.space.create_rectangle(x - 1, y - 1, x + 1, y + 1, fill = colour, outline = colour)\n\n\nclass Point:\n def __init__(self, drawn_point, colour):\n self.drawn_point = drawn_point\n self.colour = colour\n\n\nif __name__ == '__main__':\n KMeansClustering().mainloop()\n"},"license":{"kind":"string","value":"gpl-3.0"}}},{"rowIdx":2775,"cells":{"repo_name":{"kind":"string","value":"aerval/blast_comparison"},"path":{"kind":"string","value":"main.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"16353"},"content":{"kind":"string","value":"#!/bin/env python\n\n##############################################\n# CompareBLASTs #\n# A tool to compare the found hits from two #\n# BLAST searches with the same search query. #\n# #\n# by Philipp B. Rentzsch #\n# BCCDC Vancouver, BC #\n# October 2014 - January 2015 #\n# License: MIT #\n##############################################\n\nfrom __future__ import print_function\nfrom time import strptime # convert string into time object\nimport optparse # commandline parsing\nfrom blast_hit import * # BlastHit.py file\nimport string # for valid letters in filename\n\n\ndef load_blasthits(file):\n '''\n Read a tabular BLAST file into a list of BlastHits.\n\n file = (string) filename of tabular blast result file\n '''\n\n blastfile = open(file).readlines()\n hits = []\n # We can not extract every line from the tabular file into a single hit\n # since some correspont to multiple such hits\n for hit in blastfile:\n h = hit.split('\\t')\n if h[1] == h[12]:\n hits.append(BlastHit(hit))\n else:\n # When multiple gene ids contribute to the same alignment, they\n # can be summarized to one hit. In the following we split these\n # up because we want to check all hit seperately.\n subhits = h[12].split(';')\n for sub in subhits:\n h[1] = sub\n hits.append(BlastHit('\\t'.join(h)))\n return hits\n\n\nclass CompareBLASTs(object):\n\n def __init__(self, old_hits, new_hits, email, name):\n '''\n Initialize the comparison object.\n\n old_hits = List of BlastHits from the older BLAST Search\n new_hits = List of BlastHits from the newer, second BLAST Search\n email = Your email address, needed for use of NCBIs Enterez to prevent\n misuse of their service\n name = Query name that lead to the BlastHits to identify them later\n '''\n\n self.input_old_hits = old_hits\n self.input_new_hits = new_hits\n\n self.email = email\n self.name = name\n\n def compare(self):\n '''\n Compares the two lists of BlastHits for more or less similar elements\n and extracts those elements form both lists that have no companion in\n each other.\n '''\n\n # Compare for exact (or similar) hits.\n self.new_hits, self.old_hits = compare_blasts(self.input_new_hits,\n self.input_old_hits)\n\n # Retrieve basic information of coresponding genes for all old hits.\n self.oldGeneIDs = get_idlist(self.old_hits['all'], self.email)\n\n # Check all the old hits without a copy in the new hit list what\n # happend to their associated gene (whether it still exists, was\n # updated (=replaced) or deleted (=suppressed).\n oldOnly = {'live': [], 'replaced': [], 'suppressed': []}\n # A bit confusing: live and lost are handled here equivalent since a\n # hit that is live (=still existing in the db) but not found in the\n # new BLAST search was 'lost' at some point.\n for hit in self.old_hits['unknown']:\n for ID in hit.ids:\n if ID.db == 'gi':\n oldOnly[self.oldGeneIDs[ID.num]['Status']].append(hit)\n hit.status = self.oldGeneIDs[ID.num]['Status']\n break\n\n self.new_hits['replacement'] = [] # Equivalent to old_hits 'replaced'\n self.old_hits['lost'] = oldOnly['live']\n self.old_hits['suppressed'] = oldOnly['suppressed']\n self.old_hits['replacement'] = []\n\n # Check the old hits with a known replacement tag, whether a replacing\n # hit can be found within the new hits.\n for num, hit in enumerate(oldOnly['replaced']):\n for ID in hit.ids:\n if ID.db == 'gi':\n new_id = self.oldGeneIDs[ID.num]['ReplacedBy']\n found = False\n for num2, hit2 in enumerate(self.new_hits['unknown']):\n if new_id in [ID.num for ID in hit2.ids]:\n same, differences = hit.compare_hit(hit2, check_ids=False)\n if same:\n rep = self.new_hits['unknown'].pop(num2)\n rep.status = 'replacement'\n self.new_hits['replacement'].append(rep)\n self.old_hits['replacement'].append(\n oldOnly['replaced'][num])\n found = True\n break\n if not found:\n # Hit can be replaced but the replacement was nevertheless not\n # found in the new Blast Search => lost/live.\n self.old_hits['lost'].append(oldOnly['replaced'][num])\n oldOnly['replaced'][num].status = 'live'\n\n # Get the basic info for those hit in the new search, that have no\n # know relative in the old search.\n self.newGeneIDs = get_idlist(self.new_hits['unknown'], self.email)\n\n # Estimate the time of the old BLAST (or last used database update)\n # search by looking for the creation of the youngest entree that match\n # to the old hits.\n date_oldsearch = max([strptime(record['CreateDate'], '%Y/%m/%d')\n for record in self.oldGeneIDs.values()])\n\n # Check wether all new hits with no relative in the old Search are\n # indeed new (there for created after the last of the old Hits). I\n # never had this case but one can never know ...\n self.new_hits['new'] = []\n self.new_hits['old'] = []\n for hit in self.new_hits['unknown']:\n if strptime(self.newGeneIDs[hit.ids[0].num]['CreateDate'],\n '%Y/%m/%d') < date_oldsearch:\n self.new_hits['old'].append(hit)\n hit.status = 'strange'\n else:\n self.new_hits['new'].append(hit)\n hit.status = 'new'\n\n def output_comparison(self, output_types=[lambda x: print(x)], top=0,\n long_output=False, adaptive=True):\n '''\n Prints (and or writes to a file) the output of the BLAST comparison.\n\n output_types = List of output lambdas like 'lambda x: print(x)' and\n 'lambda x: output_file.write(''.join([x, '\\n']))'\n top = The number of Hits (from the top score) that are of interest for\n the comparion (0 = all)\n long_output = A longer, more describitive output\n adaptive = In adaptive mode only those categories are displayed that\n appear like if there are no new hits in the second BLAST, this is not\n dispalyed\n '''\n\n # Determine the number of hits (in the interested interval) that\n # belong to each category.\n hits_per_category = {'equal': 0, 'similar': 0, 'live': 0,\n 'replaced': 0, 'suppressed': 0, 'new': 0,\n 'strange': 0}\n\n if top == 0: # Count all hits\n top_old = len(self.old_hits['all'])\n top_new = len(self.new_hits['all'])\n else: # Count only the specified fraction of hits\n top_old = min(top, len(self.old_hits['all']))\n top_new = min(top, len(self.new_hits['all']))\n\n for hit in self.old_hits['all'][:top_old]:\n hits_per_category[hit.status] += 1\n for hit in self.new_hits['all'][:top_new]:\n if hit.status in ['new', 'strange']:\n hits_per_category[hit.status] += 1\n\n if long_output:\n category_names = {\n 'equal': 'Found in both BLASTs results:\\t%i',\n 'similar': 'Found in both BLASTs results with slight \\\n changes:\\t%i',\n 'live': 'Not showing up for unknown reasons in the second \\\n BLAST (probably low scores):\\t%i',\n 'replaced': 'Replaced/updated before the second BLAST:\\t%i',\n 'suppressed': 'Deleted/suppressed before the second BLAST:\\t\\\n %i',\n 'new': 'New hits added to the database for the second BLAST:\\t\\\n %i',\n 'strange': 'Hits that do only appear in the second BLAST \\\n that should have appeared in the first:\\t%i'}\n else:\n category_names = {\n 'equal': 'Equal Hits:\\t%i',\n 'similar': 'Changed Hits\\t%i',\n 'live': 'Lost Hits\\t%i',\n 'replaced': 'Replaced Hits:\\t%i',\n 'suppressed': 'Deleted Hits:\\t%i',\n 'new': 'New Hits:\\t%i',\n 'strange': 'New appearing Hits:\\t%i'}\n\n # For the different output channels (write to file or print).\n for output in output_types:\n # Always print the query name as more than one query can be found\n # in a single BLAST.\n if self.name:\n output('Query:\\t%s' % self.name)\n\n if long_output:\n output('Total hits in old search:\\t%i' %\n len(self.old_hits['all']))\n output('Total hits in new search:\\t%i' %\n len(self.new_hits['all']))\n\n if top_old != len(self.old_hits['all']) and \\\n top_new != len(self.new_hits['all']):\n output('Among the top %i hits were:' % top)\n else:\n output('From all hits were:')\n\n for key in ['equal', 'similar', 'live', 'replaced', 'suppressed',\n 'new', 'strange']:\n if not adaptive or hits_per_category[key] > 0:\n # In (default) adaptive mode, only those hit categories\n # are displayed that appear (example: if there is no\n # replaced hit, the replaced hits column is not displayed.\n output(category_names[key] % hits_per_category[key])\n\n # separate from following queries\n output('\\n')\n\n def export_hit_categories(self, categories, path=''):\n '''\n Exports the given categories into files (format similar to the input\n .blast format with a status column added at the end).\n \n categories = String with comma ',' delimited categories (e.g: new,\n all_old to export all new Hits and all the hits from the old search)\n path = file path to the exported files\n '''\n \n categories = categories.split(',')\n \n # Generate valid filenames:\n valid_chars = \"-_.() %s%s\" % (string.ascii_letters, string.digits)\n name = ''.join(c for c in self.name if c in valid_chars)\n\n for category in categories:\n hits = None\n if category == 'new':\n hits = self.new_hits['new']\n if category == 'equal':\n hits = self.old_hits['same']\n if category == 'similar':\n hits = self.old_hits['similar']\n if category == 'live':\n hits = self.old_hits['lost']\n if category == 'replaced':\n hits = self.old_hits['replacement']\n if category == 'suppressed':\n hits = self.old_hits['suppressed']\n if category == 'all_old':\n hits = self.old_hits['all']\n if category == 'all_new':\n hits = self.new_hits['all']\n if category == 'strange':\n hits = self.new_hits['old']\n if hits:\n with open(path + name + '_' + category + '.blast', 'w+') as f:\n # The query name and category speciefies the file name\n # (e.g. Query7_all_new.blast).\n for hit in hits:\n f.write(str(hit) + '\\n')\n else:\n print(\"Unknown export category %s\" % category)\n\n\ndef perform_comparison(opts):\n '''\n The main function that compares two BLAST files against the same Query\n Sequence\n\n opts = parsed OptionsParser\n '''\n\n new_hits = {}\n old_hits = {}\n # Load the hits from the two input files.\n new_hits_all = load_blasthits(opts.new_Blast)\n old_hits_all = load_blasthits(opts.old_Blast)\n\n # Sort all hits for their repective query (as one BLAST file can contain\n # multiple queries.\n for hit in new_hits_all:\n if hit.name in new_hits.keys():\n new_hits[hit.name].append(hit)\n else:\n new_hits[hit.name] = [hit]\n\n for hit in old_hits_all:\n if hit.name in old_hits.keys():\n old_hits[hit.name].append(hit)\n else:\n old_hits[hit.name] = [hit]\n\n # Make sure that both files where against the same queries.\n assert old_hits.keys() == new_hits.keys()\n\n # Define how to output the (general) results (print to console and/or save\n # to file).\n output_types = []\n if opts.verbose:\n output_types.append(lambda x: print(x))\n if opts.save_output:\n output_file = open(opts.output_path + opts.save_output, 'w+')\n output_types.append(lambda x: output_file.write(''.join([x, '\\n'])))\n # Somewhat complicated expression because file.write does not\n # automatically add a line end character.\n\n for key in old_hits.keys():\n blastComparison = CompareBLASTs(old_hits[key], new_hits[key],\n opts.email, key)\n blastComparison.compare()\n blastComparison.output_comparison(output_types, opts.top,\n opts.long_output, opts.adaptive)\n\n # Export specified hit categories to file.\n if opts.export:\n blastComparison.export_hit_categories(opts.export,\n path=opts.output_path)\n\n if opts.save_output:\n output_file.close()\n\nif __name__ == '__main__':\n\n # General description of the program\n usage = '''\n %prog [options]\n Neccessary to provide are the two tabular BLAST files old (-o) and new\n (-n)\n '''\n\n op = optparse.OptionParser(usage=usage)\n op.add_option('-o', '--old', default=None, dest='old_Blast',\n help='the older tabular BLAST file (24 columns)')\n op.add_option('-n', '--new', default=None, dest='new_Blast',\n help='the newer BLAST file')\n op.add_option('-t', '--top', type='int', default=0,\n help='specify when only the top X (integer value) hits for \\\n each query are of interest')\n op.add_option('-v', '--verbose', action='store_true', dest='verbose',\n default=True, help='print everything')\n op.add_option('-q', '--quiet', action='store_false', dest='verbose',\n help='stay quiet')\n op.add_option('-s', '--save', default=None, dest='save_output',\n help='file where the output is saved')\n op.add_option('-p', '--put', default='', dest='output_path',\n help='the path where the saved output and/or exported hit \\\n files are stored')\n op.add_option('-l', '--longOutput', action='store_true',\n dest='long_output', default=False,\n help='enable long names in the output')\n op.add_option('-a', '--adaptive', action='store_true',\n dest='adaptive', default=True,\n help='only display those hit classes, that have elements')\n op.add_option('-A', '--notAdaptive', action='store_false',\n dest='adaptive', help='display all elements')\n op.add_option('-e', '--email', default='test@test.com',\n help='email address of the user to send him/her notice of \\\n excess use')\n op.add_option('-x', '--export', default=None,\n help='export specified hit categories (Example: \\\n \"-x new,old_all,suppressed\", Categories: \"equal, similar, \\\n live, replaced, suppressed, new, strange, all_old and \\\n all_new)\"')\n\n opts, args = op.parse_args()\n\n assert opts.old_Blast and opts.new_Blast\n\n # Executes the analysing program\n perform_comparison(opts)\n"},"license":{"kind":"string","value":"mit"}}},{"rowIdx":2776,"cells":{"repo_name":{"kind":"string","value":"makinacorpus/pygal"},"path":{"kind":"string","value":"pygal/test/test_interpolate.py"},"copies":{"kind":"string","value":"4"},"size":{"kind":"string","value":"3200"},"content":{"kind":"string","value":"# -*- coding: utf-8 -*-\n# This file is part of pygal\n#\n# A python svg graph plotting library\n# Copyright © 2012-2014 Kozea\n#\n# This library is free software: you can redistribute it and/or modify it under\n# the terms of the GNU Lesser General Public License as published by the Free\n# Software Foundation, either version 3 of the License, or (at your option) any\n# later version.\n#\n# This library is distributed in the hope that it will be useful, but WITHOUT\n# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS\n# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more\n# details.\n#\n# You should have received a copy of the GNU Lesser General Public License\n# along with pygal. If not, see .\n\nfrom pygal.test import make_data\n\n\ndef test_cubic(Chart, datas):\n chart = Chart(interpolate='cubic')\n chart = make_data(chart, datas)\n assert chart.render()\n\n\ndef test_cubic_prec(Chart, datas):\n chart = Chart(interpolate='cubic', interpolation_precision=200)\n chart = make_data(chart, datas)\n\n chart_low = Chart(interpolate='cubic', interpolation_precision=5)\n chart_low = make_data(chart, datas)\n\n assert len(chart.render()) >= len(chart_low.render())\n\n\ndef test_quadratic(Chart, datas):\n chart = Chart(interpolate='quadratic')\n chart = make_data(chart, datas)\n assert chart.render()\n\n\ndef test_lagrange(Chart, datas):\n chart = Chart(interpolate='lagrange')\n chart = make_data(chart, datas)\n assert chart.render()\n\n\ndef test_trigonometric(Chart, datas):\n chart = Chart(interpolate='trigonometric')\n chart = make_data(chart, datas)\n assert chart.render()\n\n\ndef test_hermite(Chart, datas):\n chart = Chart(interpolate='hermite')\n chart = make_data(chart, datas)\n assert chart.render()\n\n\ndef test_hermite_finite(Chart, datas):\n chart = Chart(interpolate='hermite',\n interpolation_parameters={'type': 'finite_difference'})\n chart = make_data(chart, datas)\n assert chart.render()\n\n\ndef test_hermite_cardinal(Chart, datas):\n chart = Chart(interpolate='hermite',\n interpolation_parameters={'type': 'cardinal', 'c': .75})\n chart = make_data(chart, datas)\n assert chart.render()\n\ndef test_hermite_catmull_rom(Chart, datas):\n chart = Chart(interpolate='hermite',\n interpolation_parameters={'type': 'catmull_rom'})\n chart = make_data(chart, datas)\n assert chart.render()\n\n\ndef test_hermite_kochanek_bartels(Chart, datas):\n chart = Chart(interpolate='hermite',\n interpolation_parameters={\n 'type': 'kochanek_bartels', 'b': -1, 'c': 1, 't': 1})\n chart = make_data(chart, datas)\n assert chart.render()\n\n chart = Chart(interpolate='hermite',\n interpolation_parameters={\n 'type': 'kochanek_bartels', 'b': -1, 'c': -8, 't': 0})\n chart = make_data(chart, datas)\n assert chart.render()\n\n chart = Chart(interpolate='hermite',\n interpolation_parameters={\n 'type': 'kochanek_bartels', 'b': 0, 'c': 10, 't': -1})\n chart = make_data(chart, datas)\n assert chart.render()\n"},"license":{"kind":"string","value":"lgpl-3.0"}}},{"rowIdx":2777,"cells":{"repo_name":{"kind":"string","value":"jit/pyew"},"path":{"kind":"string","value":"pymsasid/decode.py"},"copies":{"kind":"string","value":"16"},"size":{"kind":"string","value":"31254"},"content":{"kind":"string","value":"# -----------------------------------------------------------------------------\n# decode.py\n#\n# author: matthieu.kaczmarek@mines-nancy.org\n# Mainly rewrited from udis86 -- Vivek Mohan \n# -----------------------------------------------------------------------------\n\nfrom common import DecodeException, VENDOR_INTEL, VENDOR_AMD\nfrom inst import Inst, Operand, Ptr, ie_invalid, ie_pause, ie_nop\n\n\n# this is intended: hundreds of constants used\nfrom itab import *\nfrom operand import *\n\n# Extracts instruction prefixes.\ndef get_prefixes(u, inst):\n have_pfx = 1\n\n # if in error state, bail out\n if u.error:\n return -1\n\n # keep going as long as there are prefixes available\n i = 0\n while have_pfx:\n\n # Get next byte.\n u.input.next() \n if u.error: \n return -1\n curr = u.input.current()\n\n # rex prefixes in 64bit mode\n if u.dis_mode == 64 and (curr & 0xF0) == 0x40:\n inst.pfx.rex = curr\n else:\n if curr == 0x2E:\n inst.pfx.seg = 'cs' \n inst.pfx.rex = 0\n elif curr == 0x36: \n inst.pfx.seg = 'ss' \n inst.pfx.rex = 0\n elif curr == 0x3E: \n inst.pfx.seg = 'ds' \n inst.pfx.rex = 0\n elif curr == 0x26: \n inst.pfx.seg = 'es' \n inst.pfx.rex = 0\n elif curr == 0x64: \n inst.pfx.seg = 'fs' \n inst.pfx.rex = 0\n elif curr == 0x65: \n inst.pfx.seg = 'gs' \n inst.pfx.rex = 0\n elif curr == 0x67: #adress-size override prefix \n inst.pfx.adr = 0x67\n inst.pfx.rex = 0\n elif curr == 0xF0: \n inst.pfx.lock = 0xF0\n inst.pfx.rex = 0\n elif curr == 0x66: \n # the 0x66 sse prefix is only effective if no other sse prefix\n # has already been specified.\n if inst.pfx.insn == 0:\n inst.pfx.insn = 0x66\n inst.pfx.opr = 0x66 \n inst.pfx.rex = 0\n elif curr == 0xF2:\n inst.pfx.insn = 0xF2\n inst.pfx.repne = 0xF2 \n inst.pfx.rex = 0\n elif curr == 0xF3:\n inst.pfx.insn = 0xF3\n inst.pfx.rep = 0xF3 \n inst.pfx.repe = 0xF3 \n inst.pfx.rex = 0\n else: \n # No more prefixes\n have_pfx = 0\n\n # check if we reached max instruction length\n if(i + 1) == MAX_INSN_LENGTH:\n u.error = 1\n i += 1\n\n # return status\n if u.error:\n return -1 \n\n # rewind back one byte in stream, since the above loop \n # stops with a non-prefix byte. \n u.input.back()\n\n # speculatively determine the effective operand mode,\n # based on the prefixes and the current disassembly\n # mode. This may be inaccurate, but useful for mode\n # dependent decoding.\n if u.dis_mode == 64:\n if REX_W(inst.pfx.rex):\n inst.opr_mode = 64 \n elif inst.pfx.opr:\n inst.opr_mode = 16\n elif(P_DEF64(inst.itab_entry.prefix)):\n inst.opr_mode = 64\n else:\n inst.opr_mode = 32\n if inst.pfx.adr:\n inst.adr_mode = 32 \n else: \n inst.adr_mode = 64 \n elif u.dis_mode == 32:\n if inst.pfx.opr:\n inst.opr_mode = 16\n else:\n inst.opr_mode = 32\n if inst.pfx.adr:\n inst.adr_mode = 16 \n else: \n inst.adr_mode = 32 \n elif u.dis_mode == 16:\n if inst.pfx.opr:\n inst.opr_mode = 32\n else:\n inst.opr_mode = 16\n if inst.pfx.adr:\n inst.adr_mode = 32 \n else: \n inst.adr_mode = 16 \n return 0\n\n\n# Searches the instruction tables for the right entry.\ndef search_itab(u, inst):\n # if in state of error, return \n did_peek = 0\n if u.error:\n return -1\n\n # get first byte of opcode\n u.input.next() \n if u.error:\n return -1\n curr = u.input.current() \n if curr == None :\n inst.itab_entry = ie_invalid\n inst.operator = inst.itab_entry.operator\n return 0 \n\n # resolve xchg, nop, pause crazyness\n if 0x90 == curr:\n if not(u.dis_mode == 64 and REX_B(inst.pfx.rex)):\n if(inst.pfx.rep):\n inst.pfx.rep = 0\n e = ie_pause\n else:\n e = ie_nop\n inst.itab_entry = e\n inst.operator = inst.itab_entry.operator\n return 0\n\n # get top-level table\n elif 0x0F == curr:\n table = ITAB__0F\n curr = u.input.next()\n if u.error:\n return -1\n # 2byte opcodes can be modified by 0x66, F3, and F2 prefixes\n if 0x66 == inst.pfx.insn:\n if itab_list[ITAB__PFX_SSE66__0F][curr].operator != 'invalid':\n table = ITAB__PFX_SSE66__0F\n inst.pfx.opr = 0\n elif 0xF2 == inst.pfx.insn:\n if itab_list[ITAB__PFX_SSEF2__0F][curr].operator != 'invalid':\n table = ITAB__PFX_SSEF2__0F\n inst.pfx.repne = 0\n elif 0xF3 == inst.pfx.insn:\n if itab_list[ITAB__PFX_SSEF3__0F][curr].operator != 'invalid':\n table = ITAB__PFX_SSEF3__0F\n inst.pfx.repe = 0\n inst.pfx.rep = 0\n # pick an instruction from the 1byte table\n else:\n table = ITAB__1BYTE\n\n index = curr\n\n while True:\n e = itab_list[ table ][ index ]\n\n # if operator constant is a standard instruction constant\n # our search is over.\n \n if e.operator in operator:\n if e.operator == 'invalid':\n if did_peek:\n u.input.next() \n if u.input.error:\n raise DecodeException('error') \n #return -1\n inst.itab_entry = e\n inst.operator = inst.itab_entry.operator\n return 0\n \n table = e.prefix\n \n if e.operator == 'grp_reg':\n peek = u.input.peek()\n did_peek = 1\n index = MODRM_REG(peek)\n elif e.operator == 'grp_mod':\n peek = u.input.peek()\n did_peek = 1\n index = MODRM_MOD(peek)\n if index == 3:\n index = ITAB__MOD_INDX__11\n else:\n index = ITAB__MOD_INDX__NOT_11\n elif e.operator == 'grp_rm':\n curr = u.input.next()\n did_peek = 0\n if u.error:\n return -1\n index = MODRM_RM(curr)\n \n elif e.operator == 'grp_x87':\n curr = u.input.next()\n did_peek = 0\n if u.error:\n return -1\n index = curr - 0xC0\n \n elif e.operator == 'grp_osize':\n if inst.opr_mode == 64:\n index = ITAB__MODE_INDX__64\n elif inst.opr_mode == 32: \n index = ITAB__MODE_INDX__32\n else:\n index = ITAB__MODE_INDX__16\n \n elif e.operator == 'grp_asize':\n if inst.adr_mode == 64:\n index = ITAB__MODE_INDX__64\n elif inst.adr_mode == 32: \n index = ITAB__MODE_INDX__32\n else:\n index = ITAB__MODE_INDX__16\n \n elif e.operator == 'grp_mode':\n if u.dis_mode == 64:\n index = ITAB__MODE_INDX__64\n elif u.dis_mode == 32:\n index = ITAB__MODE_INDX__32\n else:\n index = ITAB__MODE_INDX__16\n \n elif e.operator == 'grp_vendor':\n if u.vendor == VENDOR_INTEL: \n index = ITAB__VENDOR_INDX__INTEL\n elif u.vendor == VENDOR_AMD:\n index = ITAB__VENDOR_INDX__AMD\n else:\n raise DecodeException('unrecognized vendor id')\n \n elif e.operator == 'd3vil':\n raise DecodeException('invalid instruction operator constant Id3vil')\n else:\n raise DecodeException('invalid instruction operator constant')\n \n inst.itab_entry = e\n inst.operator = inst.itab_entry.operator\n return 0\n\ndef resolve_operand_size(u, inst, s):\n if s == SZ_V:\n return inst.opr_mode\n elif s == SZ_Z: \n if inst.opr_mode == 16:\n return 16\n else:\n return 32\n elif s == SZ_P: \n if inst.opr_mode == 16:\n return SZ_WP\n else:\n return SZ_DP\n elif s == SZ_MDQ:\n if inst.opr_mode == 16:\n return 32\n else:\n return inst.opr_mode\n elif s == SZ_RDQ:\n if u.dis_mode == 64:\n return 64\n else:\n return 32\n else:\n return s\n\n\ndef resolve_operator(u, inst):\n # far/near flags \n inst.branch_dist = None\n # readjust operand sizes for call/jmp instrcutions \n if inst.operator == 'call' or inst.operator == 'jmp':\n # WP: 16bit pointer \n if inst.operand[0].size == SZ_WP:\n inst.operand[0].size = 16\n inst.branch_dist = 'far'\n # DP: 32bit pointer\n elif inst.operand[0].size == SZ_DP:\n inst.operand[0].size = 32\n inst.branch_dist = 'far'\n elif inst.operand[0].size == 8:\n inst.branch_dist = 'near'\n # resolve 3dnow weirdness \n elif inst.operator == '3dnow': \n inst.operator = itab_list[ITAB__3DNOW][u.input.current()].operator\n # SWAPGS is only valid in 64bits mode\n if inst.operator == 'swapgs' and u.dis_mode != 64:\n u.error = 1\n return -1\n return 0\n\ndef decode_a(u, inst, op):\n \"\"\"Decodes operands of the type seg:offset.\"\"\"\n if inst.opr_mode == 16: \n # seg16:off16 \n op.type = 'OP_PTR'\n op.size = 32\n op.lval = Ptr(u.input.read(16), u.input.read(16))\n else:\n # seg16:off32 \n op.type = 'OP_PTR'\n op.size = 48\n op.lval = Ptr(u.input.read(32), u.input.read(16))\n\ndef decode_gpr(u, inst, s, rm):\n \"\"\"Returns decoded General Purpose Register.\"\"\"\n s = resolve_operand_size(u, inst, s)\n \n if s == 64:\n return GPR[64][rm]\n elif s == SZ_DP or s == 32:\n return GPR[32][rm]\n elif s == SZ_WP or s == 16:\n return GPR[16][rm]\n elif s == 8:\n if u.dis_mode == 64 and inst.pfx.rex:\n if rm >= 4:\n return GPR[8][rm+4]\n return GPR[8][rm]\n else: \n return GPR[8][rm]\n else:\n return None\n\ndef resolve_gpr64(u, inst, gpr_op):\n \"\"\"64bit General Purpose Register-Selection.\"\"\"\n if gpr_op in range(OP_rAXr8, OP_rDIr15) :\n index = (gpr_op - OP_rAXr8) |(REX_B(inst.pfx.rex) << 3) \n else:\n index = gpr_op - OP_rAX\n if inst.opr_mode == 16:\n return GPR[16][index]\n elif u.dis_mode == 32 or not(inst.opr_mode == 32 and REX_W(inst.pfx.rex) == 0):\n return GPR[32][index]\n return GPR[64][index]\n\ndef resolve_gpr32(u, inst, gpr_op):\n \"\"\"32bit General Purpose Register-Selection.\"\"\"\n index = gpr_op - OP_eAX\n if(inst.opr_mode == 16):\n return GPR[16][index]\n return GPR[32][index]\n\ndef resolve_reg(regtype, i):\n \"\"\"Resolves the register type.\"\"\"\n return GPR[regtype][i]\n\ndef decode_imm(u, inst, s, op):\n \"\"\"Decodes Immediate values.\"\"\"\n op.size = resolve_operand_size(u, inst, s)\n op.type = 'OP_IMM'\n op.lval = u.input.read(op.size) \n\ndef decode_modrm(u, inst, op, s, rm_type, opreg, reg_size, reg_type):\n \"\"\"Decodes ModRM Byte.\"\"\"\n u.input.next()\n\n # get mod, r/m and reg fields\n mod = MODRM_MOD(u.input.current())\n rm = (REX_B(inst.pfx.rex) << 3) | MODRM_RM(u.input.current())\n reg = (REX_R(inst.pfx.rex) << 3) | MODRM_REG(u.input.current())\n\n op.size = resolve_operand_size(u, inst, s)\n\n # if mod is 11b, then the m specifies a gpr/mmx/sse/control/debug \n if mod == 3:\n op.type = 'OP_REG'\n if rm_type == 'T_GPR':\n op.base = decode_gpr(u, inst, op.size, rm)\n else: \n op.base = resolve_reg(rm_type, (REX_B(inst.pfx.rex) << 3) |(rm&7))\n\n # else its memory addressing \n else: \n op.type = 'OP_MEM'\n op.seg = inst.pfx.seg\n # 64bit addressing \n if inst.adr_mode == 64:\n\n op.base = GPR[64][rm]\n\n # get offset type\n if mod == 1:\n op.offset = 8\n elif mod == 2:\n op.offset = 32\n elif mod == 0 and(rm & 7) == 5: \n op.base = 'rip'\n op.offset = 32\n else:\n op.offset = 0\n\n # Scale-Index-Base(SIB)\n if rm & 7 == 4:\n u.input.next()\n \n op.scale = (1 << SIB_S(u.input.current())) & ~1\n op.index = GPR[64][(SIB_I(u.input.current()) |(REX_X(inst.pfx.rex) << 3))]\n op.base = GPR[64][(SIB_B(u.input.current()) |(REX_B(inst.pfx.rex) << 3))]\n\n # special conditions for base reference\n if op.index == 'rsp':\n op.index = None\n op.scale = 0\n\n if op.base == 'rbp' or op.base == 'r13':\n if mod == 0: \n op.base = None\n if mod == 1:\n op.offset = 8\n else:\n op.offset = 32\n\n # 32-Bit addressing mode \n elif inst.adr_mode == 32:\n\n # get base \n op.base = GPR[16][rm]\n\n # get offset type \n if mod == 1:\n op.offset = 8\n elif mod == 2:\n op.offset = 32\n elif mod == 0 and rm == 5:\n op.base = None\n op.offset = 32\n else:\n op.offset = 0\n\n # Scale-Index-Base(SIB)\n if(rm & 7) == 4:\n u.input.next()\n\n op.scale = (1 << SIB_S(u.input.current())) & ~1\n op.index = GPR[32][SIB_I(u.input.current()) |(REX_X(inst.pfx.rex) << 3)]\n op.base = GPR[32][SIB_B(u.input.current()) |(REX_B(inst.pfx.rex) << 3)]\n\n if op.index == 'esp':\n op.index = None\n op.scale = 0\n\n # special condition for base reference \n if op.base == 'ebp':\n if mod == 0:\n op.base = None\n if mod == 1:\n op.offset = 8\n else:\n op.offset = 32\n\n # 16bit addressing mode \n else:\n if rm == 0: \n op.base = 'bx'\n op.index = 'si'\n elif rm == 1: \n op.base = 'bx'\n op.index = 'di'\n elif rm == 2: \n op.base = 'bp'\n op.index = 'si'\n elif rm == 3: \n op.base = 'bp'\n op.index = 'di'\n elif rm == 4: \n op.base = 'si'\n elif rm == 5: \n op.base = 'di'\n elif rm == 6: \n op.base = 'bp'\n elif rm == 7: \n op.base = 'bx'\n \n if mod == 0 and rm == 6:\n op.offset = 16\n op.base = None\n elif mod == 1:\n op.offset = 8\n elif mod == 2: \n op.offset = 16\n\n # extract offset, if any \n if op.offset in [8, 16, 32, 64]: \n op.lval = u.input.read(op.offset)\n bound = pow(2, op.offset - 1)\n if op.lval > bound:\n op.lval = -(((2 * bound) - op.lval) % bound)\n\n # resolve register encoded in reg field\n if opreg:\n opreg.type = 'OP_REG'\n opreg.size = resolve_operand_size(u, inst, reg_size)\n if reg_type == 'T_GPR': \n opreg.base = decode_gpr(u, inst, opreg.size, reg)\n else:\n opreg.base = resolve_reg(reg_type, reg)\n\ndef decode_o(u, inst, s, op):\n \"\"\"Decodes offset.\"\"\"\n op.seg = inst.pfx.seg\n op.offset = inst.adr_mode \n op.lval = u.input.read(inst.adr_mode) \n op.type = 'OP_MEM'\n op.size = resolve_operand_size(u, inst, s)\n\ndef disasm_operands(u, inst):\n \"\"\"Disassembles Operands.\"\"\"\n # get type\n def get_mopt(x): return x.type\n mopt = map(get_mopt, inst.itab_entry.operand)\n # get size\n def get_mops(x): return x.size\n mops = map(get_mops, inst.itab_entry.operand)\n\n if mopt[2] != OP_NONE:\n inst.operand = [Operand(), Operand(), Operand()]\n elif mopt[1] != OP_NONE:\n inst.operand = [Operand(), Operand()] \n elif mopt[0] != OP_NONE:\n inst.operand = [Operand()]\n \n # iop = instruction operand \n #iop = inst.operand\n \n if mopt[0] == OP_A:\n decode_a(u, inst, inst.operand[0]) \n # M[b] ... \n # E, G/P/V/I/CL/1/S \n elif mopt[0] == OP_M or mopt[0] == OP_E:\n if mopt[0] == OP_M and MODRM_MOD(u.input.peek()) == 3:\n u.error = 1\n if mopt[1] == OP_G:\n decode_modrm(u, inst, inst.operand[0], mops[0], 'T_GPR', inst.operand[1], mops[1], 'T_GPR')\n if mopt[2] == OP_I:\n decode_imm(u, inst, mops[2], inst.operand[2])\n elif mopt[2] == OP_CL:\n inst.operand[2].type = 'OP_REG'\n inst.operand[2].base = 'cl'\n inst.operand[2].size = 8\n elif mopt[1] == OP_P:\n decode_modrm(u, inst, inst.operand[0], mops[0], 'T_GPR', inst.operand[1], mops[1], 'T_MMX')\n elif mopt[1] == OP_V:\n decode_modrm(u, inst, inst.operand[0], mops[0], 'T_GPR', inst.operand[1], mops[1], 'T_XMM')\n elif mopt[1] == OP_S:\n decode_modrm(u, inst, inst.operand[0], mops[0], 'T_GPR', inst.operand[1], mops[1], 'T_SEG')\n else:\n decode_modrm(u, inst, inst.operand[0], mops[0], 'T_GPR', NULL, 0, 'T_NONE')\n if mopt[1] == OP_CL:\n inst.operand[1].type = 'OP_REG'\n inst.operand[1].base = 'cl'\n inst.operand[1].size = 8\n elif mopt[1] == OP_I1:\n inst.operand[1].type = 'OP_IMM'\n inst.operand[1].lval = 1\n elif mopt[1] == OP_I:\n decode_imm(u, inst, mops[1], inst.operand[1])\n\n # G, E/PR[,I]/VR \n elif mopt[0] == OP_G:\n if mopt[1] == OP_M:\n if MODRM_MOD(u.input.peek()) == 3:\n u.error = 1\n decode_modrm(u, inst, inst.operand[1], mops[1], 'T_GPR', inst.operand[0], mops[0], 'T_GPR')\n elif mopt[1] == OP_E:\n decode_modrm(u, inst, inst.operand[1], mops[1], 'T_GPR', inst.operand[0], mops[0], 'T_GPR')\n if mopt[2] == OP_I:\n decode_imm(u, inst, mops[2], inst.operand[2])\n elif mopt[1] == OP_PR:\n decode_modrm(u, inst, inst.operand[1], mops[1], 'T_MMX', inst.operand[0], mops[0], 'T_GPR')\n if mopt[2] == OP_I:\n decode_imm(u, inst, mops[2], inst.operand[2])\n elif mopt[1] == OP_VR:\n if MODRM_MOD(u.input.peek()) != 3:\n u.error = 1\n decode_modrm(u, inst, inst.operand[1], mops[1], 'T_XMM', inst.operand[0], mops[0], 'T_GPR')\n elif mopt[1] == OP_W:\n decode_modrm(u, inst, inst.operand[1], mops[1], 'T_XMM', inst.operand[0], mops[0], 'T_GPR')\n\n # AL..BH, I/O/DX \n elif mopt[0] in [OP_AL, OP_CL, OP_DL, OP_BL,\n OP_AH, OP_CH, OP_DH, OP_BH]:\n inst.operand[0].type = 'OP_REG'\n inst.operand[0].base = GPR[8][mopt[0] - OP_AL]\n inst.operand[0].size = 8\n\n if mopt[1] == OP_I:\n decode_imm(u, inst, mops[1], inst.operand[1])\n elif mopt[1] == OP_DX:\n inst.operand[1].type = 'OP_REG'\n inst.operand[1].base = 'dx'\n inst.operand[1].size = 16\n elif mopt[1] == OP_O:\n decode_o(u, inst, mops[1], inst.operand[1])\n\n # rAX[r8]..rDI[r15], I/rAX..rDI/O\n elif mopt[0] in [OP_rAXr8, OP_rCXr9, OP_rDXr10, OP_rBXr11,\n OP_rSPr12, OP_rBPr13, OP_rSIr14, OP_rDIr15,\n OP_rAX, OP_rCX, OP_rDX, OP_rBX,\n OP_rSP, OP_rBP, OP_rSI, OP_rDI]:\n inst.operand[0].type = 'OP_REG'\n inst.operand[0].base = resolve_gpr64(u, inst, mopt[0])\n\n if mopt[1] == OP_I:\n decode_imm(u, inst, mops[1], inst.operand[1])\n elif mopt[1] in [OP_rAX, OP_rCX, OP_rDX, OP_rBX,\n OP_rSP, OP_rBP, OP_rSI, OP_rDI]:\n inst.operand[1].type = 'OP_REG'\n inst.operand[1].base = resolve_gpr64(u, inst, mopt[1])\n elif mopt[1] == OP_O:\n decode_o(u, inst, mops[1], inst.operand[1]) \n inst.operand[0].size = resolve_operand_size(u, inst, mops[1])\n\n elif mopt[0] in [OP_ALr8b, OP_CLr9b, OP_DLr10b, OP_BLr11b,\n OP_AHr12b, OP_CHr13b, OP_DHr14b, OP_BHr15b]:\n gpr = (mopt[0] - OP_ALr8b +(REX_B(inst.pfx.rex) << 3))\n if gpr in ['ah',\t'ch',\t'dh',\t'bh',\n 'spl',\t'bpl',\t'sil',\t'dil',\n 'r8b',\t'r9b',\t'r10b',\t'r11b',\n 'r12b',\t'r13b',\t'r14b',\t'r15b',\n ] and inst.pfx.rex: \n gpr = gpr + 4\n inst.operand[0].type = 'OP_REG'\n inst.operand[0].base = GPR[8][gpr]\n if mopt[1] == OP_I:\n decode_imm(u, inst, mops[1], inst.operand[1])\n\n # eAX..eDX, DX/I \n elif mopt[0] in [OP_eAX, OP_eCX, OP_eDX, OP_eBX,\n OP_eSP, OP_eBP, OP_eSI, OP_eDI]:\n inst.operand[0].type = 'OP_REG'\n inst.operand[0].base = resolve_gpr32(u, inst, mopt[0])\n if mopt[1] == OP_DX:\n inst.operand[1].type = 'OP_REG'\n inst.operand[1].base = 'dx'\n inst.operand[1].size = 16\n elif mopt[1] == OP_I:\n decode_imm(u, inst, mops[1], inst.operand[1])\n\n # ES..GS \n elif mopt[0] in [OP_ES, OP_CS, OP_DS,\n OP_SS, OP_FS, OP_GS]:\n\n # in 64bits mode, only fs and gs are allowed \n if u.dis_mode == 64:\n if mopt[0] != OP_FS and mopt[0] != OP_GS:\n u.error = 1\n inst.operand[0].type = 'OP_REG'\n inst.operand[0].base = GPR['T_SEG'][mopt[0] - OP_ES]\n inst.operand[0].size = 16\n\n # J \n elif mopt[0] == OP_J:\n decode_imm(u, inst, mops[0], inst.operand[0])\n # MK take care of signs\n bound = pow(2, inst.operand[0].size - 1)\n if inst.operand[0].lval > bound:\n inst.operand[0].lval = -(((2 * bound) - inst.operand[0].lval) % bound)\n inst.operand[0].type = 'OP_JIMM'\n\n # PR, I \n elif mopt[0] == OP_PR:\n if MODRM_MOD(u.input.peek()) != 3:\n u.error = 1\n decode_modrm(u, inst, inst.operand[0], mops[0], 'T_MMX', NULL, 0, 'T_NONE')\n if mopt[1] == OP_I:\n decode_imm(u, inst, mops[1], inst.operand[1])\n\n # VR, I \n elif mopt[0] == OP_VR:\n if MODRM_MOD(u.input.peek()) != 3:\n u.error = 1\n decode_modrm(u, inst, inst.operand[0], mops[0], 'T_XMM', NULL, 0, 'T_NONE')\n if mopt[1] == OP_I:\n decode_imm(u, inst, mops[1], inst.operand[1])\n\n # P, Q[,I]/W/E[,I],VR \n elif mopt[0] == OP_P:\n if mopt[1] == OP_Q:\n decode_modrm(u, inst, inst.operand[1], mops[1], 'T_MMX', inst.operand[0], mops[0], 'T_MMX')\n if mopt[2] == OP_I:\n decode_imm(u, inst, mops[2], inst.operand[2])\n elif mopt[1] == OP_W:\n decode_modrm(u, inst, inst.operand[1], mops[1], 'T_XMM', inst.operand[0], mops[0], 'T_MMX')\n elif mopt[1] == OP_VR:\n if MODRM_MOD(u.input.peek()) != 3:\n u.error = 1\n decode_modrm(u, inst, inst.operand[1], mops[1], 'T_XMM', inst.operand[0], mops[0], 'T_MMX')\n elif mopt[1] == OP_E:\n decode_modrm(u, inst, inst.operand[1], mops[1], 'T_GPR', inst.operand[0], mops[0], 'T_MMX')\n if mopt[2] == OP_I:\n decode_imm(u, inst, mops[2], inst.operand[2])\n\n # R, C/D \n elif mopt[0] == OP_R:\n if mopt[1] == OP_C:\n decode_modrm(u, inst, inst.operand[0], mops[0], 'T_GPR', inst.operand[1], mops[1], 'T_CRG')\n elif mopt[1] == OP_D:\n decode_modrm(u, inst, inst.operand[0], mops[0], 'T_GPR', inst.operand[1], mops[1], 'T_DBG')\n\n # C, R \n elif mopt[0] == OP_C:\n decode_modrm(u, inst, inst.operand[1], mops[1], 'T_GPR', inst.operand[0], mops[0], 'T_CRG')\n\n # D, R \n elif mopt[0] == OP_D:\n decode_modrm(u, inst, inst.operand[1], mops[1], 'T_GPR', inst.operand[0], mops[0], 'T_DBG')\n\n # Q, P \n elif mopt[0] == OP_Q:\n decode_modrm(u, inst, inst.operand[0], mops[0], 'T_MMX', inst.operand[1], mops[1], 'T_MMX')\n\n # S, E \n elif mopt[0] == OP_S:\n decode_modrm(u, inst, inst.operand[1], mops[1], 'T_GPR', inst.operand[0], mops[0], 'T_SEG')\n\n # W, V \n elif mopt[0] == OP_W:\n decode_modrm(u, inst, inst.operand[0], mops[0], 'T_XMM', inst.operand[1], mops[1], 'T_XMM')\n\n # V, W[,I]/Q/M/E \n elif mopt[0] == OP_V:\n if mopt[1] == OP_W:\n # special cases for movlps and movhps \n if MODRM_MOD(u.input.peek()) == 3:\n if inst.operator == 'movlps':\n inst.operator = 'movhlps'\n elif inst.operator == 'movhps':\n inst.operator = 'movlhps'\n decode_modrm(u, inst, inst.operand[1], mops[1], 'T_XMM', inst.operand[0], mops[0], 'T_XMM')\n if mopt[2] == OP_I:\n decode_imm(u, inst, mops[2], inst.operand[2])\n elif mopt[1] == OP_Q:\n decode_modrm(u, inst, inst.operand[1], mops[1], 'T_MMX', inst.operand[0], mops[0], 'T_XMM')\n elif mopt[1] == OP_M:\n if MODRM_MOD(u.input.peek()) == 3:\n u.error = 1\n decode_modrm(u, inst, inst.operand[1], mops[1], 'T_GPR', inst.operand[0], mops[0], 'T_XMM')\n elif mopt[1] == OP_E:\n decode_modrm(u, inst, inst.operand[1], mops[1], 'T_GPR', inst.operand[0], mops[0], 'T_XMM')\n elif mopt[1] == OP_PR:\n decode_modrm(u, inst, inst.operand[1], mops[1], 'T_MMX', inst.operand[0], mops[0], 'T_XMM')\n\n # DX, eAX/AL\n elif mopt[0] == OP_DX:\n inst.operand[0].type = 'OP_REG'\n inst.operand[0].base = 'dx'\n inst.operand[0].size = 16\n\n if mopt[1] == OP_eAX:\n inst.operand[1].type = 'OP_REG' \n inst.operand[1].base = resolve_gpr32(u, inst, mopt[1])\n elif mopt[1] == OP_AL:\n inst.operand[1].type = 'OP_REG'\n inst.operand[1].base = 'al'\n inst.operand[1].size = 8\n\n # I, I/AL/eAX\n elif mopt[0] == OP_I:\n decode_imm(u, inst, mops[0], inst.operand[0])\n if mopt[1] == OP_I:\n decode_imm(u, inst, mops[1], inst.operand[1])\n elif mopt[1] == OP_AL:\n inst.operand[1].type = 'OP_REG'\n inst.operand[1].base = 'al'\n inst.operand[1].size = 16\n elif mopt[1] == OP_eAX:\n inst.operand[1].type = 'OP_REG' \n inst.operand[1].base = resolve_gpr32(u, inst, mopt[1])\n\n # O, AL/eAX\n elif mopt[0] == OP_O:\n decode_o(u, inst, mops[0], inst.operand[0])\n inst.operand[1].type = 'OP_REG'\n inst.operand[1].size = resolve_operand_size(u, inst, mops[0])\n if mopt[1] == OP_AL:\n inst.operand[1].base = 'al' \n elif mopt[1] == OP_eAX:\n inst.operand[1].base = resolve_gpr32(u, inst, mopt[1])\n elif mopt[1] == OP_rAX:\n inst.operand[1].base = resolve_gpr64(u, inst, mopt[1]) \n\n # 3\n elif mopt[0] == OP_I3:\n inst.operand[0].type = 'OP_IMM'\n inst.operand[0].lval = 3\n\n # ST(n), ST(n) \n elif mopt[0] in [OP_ST0, OP_ST1, OP_ST2, OP_ST3,\n OP_ST4, OP_ST5, OP_ST6, OP_ST7]:\n inst.operand[0].type = 'OP_REG'\n inst.operand[0].base = GPR['T_ST'][mopt[0] - OP_ST0]\n inst.operand[0].size = 0\n\n if mopt[1] in [OP_ST0, OP_ST1, OP_ST2, OP_ST3,\n OP_ST4, OP_ST5, OP_ST6, OP_ST7]:\n inst.operand[1].type = 'OP_REG'\n inst.operand[1].base = GPR['T_ST'][mopt[1] - OP_ST0]\n inst.operand[1].size = 0\n\n # AX \n elif mopt[0] == OP_AX:\n inst.operand[0].type = 'OP_REG'\n inst.operand[0].base = 'ax'\n inst.operand[0].size = 16\n\n # none \n else:\n for op in inst.operand:\n op.type = None\n\n return 0\n\n\ndef do_mode(u, inst):\n # if in error state, bail out \n if u.error:\n return -1 \n\n # propagate perfix effects \n if u.dis_mode == 64: # set 64bit-mode flags\n # Check validity of instruction m64 \n if P_INV64(inst.itab_entry.prefix):\n u.error = 1\n return -1\n\n # effective rex prefix is the effective mask for the \n # instruction hard-coded in the opcode map.\n inst.pfx.rex = ((inst.pfx.rex & 0x40) \n |(inst.pfx.rex & REX_PFX_MASK(inst.itab_entry.prefix)))\n\n # calculate effective operand size \n if REX_W(inst.pfx.rex) or P_DEF64(inst.itab_entry.prefix):\n inst.opr_mode = 64\n elif inst.pfx.opr:\n inst.opr_mode = 16\n else:\n inst.opr_mode = 32\n\n # calculate effective address size\n if inst.pfx.adr:\n inst.adr_mode = 32 \n else:\n inst.adr_mode = 64\n elif u.dis_mode == 32: # set 32bit-mode flags \n if inst.pfx.opr:\n inst.opr_mode = 16 \n else:\n inst.opr_mode = 32\n if inst.pfx.adr:\n inst.adr_mode = 16 \n else: \n inst.adr_mode = 32\n elif u.dis_mode == 16: # set 16bit-mode flags \n if inst.pfx.opr:\n inst.opr_mode = 32 \n else: \n inst.opr_mode = 16\n if inst.pfx.adr:\n inst.adr_mode = 32 \n else: \n inst.adr_mode = 16\n # These flags determine which operand to apply the operand size\n # cast to.\n cast = [P_C0, P_C1, P_C2]\n for i in range(len(inst.operand)):\n inst.operand[i].cast = cast[i](inst.itab_entry.prefix)\n\n return 0\n\ndef decode(self):\n \"\"\"Instruction decoder. Returns the number of bytes decoded.\"\"\"\n inst = Inst(myInput = self.input, add = self.pc, mode = self.dis_mode, syntax = self.syntax)\n self.error = 0\n self.input.start ()\n if get_prefixes(self, inst) != 0:\n pass # ;print('prefixes error') # error \n elif search_itab(self, inst) != 0:\n pass #; print('itab error') # error \n elif do_mode(self, inst) != 0:\n pass #; print('mode error') # error \n elif disasm_operands(self, inst) != 0:\n pass #; print('operand error') # error \n elif resolve_operator(self, inst) != 0:\n pass #; print('operator error') # error \n # Handle decode error.\n if self.error:\n inst.clear()\n inst.size = self.input.ctr + 1\n inst.raw = self.input.buffer[0:inst.size]\n inst.set_pc(inst.add + inst.size)\n return inst\n"},"license":{"kind":"string","value":"gpl-2.0"}}},{"rowIdx":2778,"cells":{"repo_name":{"kind":"string","value":"jemofthewest/mykoans"},"path":{"kind":"string","value":"python2/libs/colorama/win32.py"},"copies":{"kind":"string","value":"86"},"size":{"kind":"string","value":"2730"},"content":{"kind":"string","value":"\n# from winbase.h\nSTDOUT = -11\nSTDERR = -12\n\ntry:\n from ctypes import windll\nexcept ImportError:\n windll = None\n SetConsoleTextAttribute = lambda *_: None\nelse:\n from ctypes import (\n byref, Structure, c_char, c_short, c_uint32, c_ushort\n )\n\n handles = {\n STDOUT: windll.kernel32.GetStdHandle(STDOUT),\n STDERR: windll.kernel32.GetStdHandle(STDERR),\n }\n\n SHORT = c_short\n WORD = c_ushort\n DWORD = c_uint32\n TCHAR = c_char\n\n class COORD(Structure):\n \"\"\"struct in wincon.h\"\"\"\n _fields_ = [\n ('X', SHORT),\n ('Y', SHORT),\n ]\n\n class SMALL_RECT(Structure):\n \"\"\"struct in wincon.h.\"\"\"\n _fields_ = [\n (\"Left\", SHORT),\n (\"Top\", SHORT),\n (\"Right\", SHORT),\n (\"Bottom\", SHORT),\n ]\n\n class CONSOLE_SCREEN_BUFFER_INFO(Structure):\n \"\"\"struct in wincon.h.\"\"\"\n _fields_ = [\n (\"dwSize\", COORD),\n (\"dwCursorPosition\", COORD),\n (\"wAttributes\", WORD),\n (\"srWindow\", SMALL_RECT),\n (\"dwMaximumWindowSize\", COORD),\n ]\n\n def GetConsoleScreenBufferInfo(stream_id):\n handle = handles[stream_id]\n csbi = CONSOLE_SCREEN_BUFFER_INFO()\n success = windll.kernel32.GetConsoleScreenBufferInfo(\n handle, byref(csbi))\n # This fails when imported via setup.py when installing using 'pip'\n # presumably the fix is that running setup.py should not trigger all\n # this activity.\n # assert success\n return csbi\n\n def SetConsoleTextAttribute(stream_id, attrs):\n handle = handles[stream_id]\n success = windll.kernel32.SetConsoleTextAttribute(handle, attrs)\n assert success\n\n def SetConsoleCursorPosition(stream_id, position):\n handle = handles[stream_id]\n position = COORD(*position)\n success = windll.kernel32.SetConsoleCursorPosition(handle, position)\n assert success\n\n def FillConsoleOutputCharacter(stream_id, char, length, start):\n handle = handles[stream_id]\n char = TCHAR(char)\n length = DWORD(length)\n start = COORD(*start)\n num_written = DWORD(0)\n # AttributeError: function 'FillConsoleOutputCharacter' not found\n # could it just be that my types are wrong?\n success = windll.kernel32.FillConsoleOutputCharacter(\n handle, char, length, start, byref(num_written))\n assert success\n return num_written.value\n\n\nif __name__=='__main__':\n x = GetConsoleScreenBufferInfo(STDOUT)\n print(x.dwSize)\n print(x.dwCursorPosition)\n print(x.wAttributes)\n print(x.srWindow)\n print(x.dwMaximumWindowSize)\n\n"},"license":{"kind":"string","value":"mit"}}},{"rowIdx":2779,"cells":{"repo_name":{"kind":"string","value":"ztrautt/tutorials"},"path":{"kind":"string","value":"TEM-image-simple/mdcs/explore.py"},"copies":{"kind":"string","value":"10"},"size":{"kind":"string","value":"1530"},"content":{"kind":"string","value":"#! /usr/bin/env python\nimport requests\nfrom collections import OrderedDict\n\ndef select_all(host,user,pswd,cert=None,format=None):\n url = host + \"/rest/explore/select/all\"\n params = dict()\n if format: params['dataformat'] = format\n r = requests.get(url, params=params, auth=(user, pswd), verify=cert)\n return r.json(object_pairs_hook=OrderedDict)\n\ndef select(host,user,pswd,cert=None,format=None,ID=None,template=None,title=None):\n url = host + \"/rest/explore/select\"\n params = dict()\n if format: params['dataformat'] = format\n if ID: params['id'] = ID\n if template: params['schema'] = template\n if title: params['title'] = title\n r = requests.get(url, params=params, auth=(user, pswd), verify=cert)\n return r.json(object_pairs_hook=OrderedDict)\n \ndef delete(ID,host,user,pswd,cert=None):\n url = host + \"/rest/explore/delete\"\n params = dict()\n params['id']=ID\n r = requests.delete(url, params=params, auth=(user, pswd), verify=cert)\n if int(r.status_code)==204:\n return \"Successful deletion of: \"+ID\n else:\n return r.json()\n \ndef query(host,user,pswd,cert=None,format=None,query=None,repositories=None):\n url = host + \"/rest/explore/query-by-example\"\n data = dict()\n if format: data['dataformat'] = format\n if query: data['query'] = query\n if repositories: data['repositories'] = repositories\n r = requests.post(url, data=data, auth=(user, pswd), verify=cert)\n return r.json(object_pairs_hook=OrderedDict)"},"license":{"kind":"string","value":"cc0-1.0"}}},{"rowIdx":2780,"cells":{"repo_name":{"kind":"string","value":"HBehrens/feedsanitizer"},"path":{"kind":"string","value":"django/contrib/gis/utils/wkt.py"},"copies":{"kind":"string","value":"419"},"size":{"kind":"string","value":"1846"},"content":{"kind":"string","value":"\"\"\"\n Utilities for manipulating Geometry WKT.\n\"\"\"\n\ndef precision_wkt(geom, prec):\n \"\"\"\n Returns WKT text of the geometry according to the given precision (an \n integer or a string). If the precision is an integer, then the decimal\n places of coordinates WKT will be truncated to that number:\n\n >>> pnt = Point(5, 23)\n >>> pnt.wkt\n 'POINT (5.0000000000000000 23.0000000000000000)'\n >>> precision(geom, 1)\n 'POINT (5.0 23.0)'\n\n If the precision is a string, it must be valid Python format string \n (e.g., '%20.7f') -- thus, you should know what you're doing.\n \"\"\"\n if isinstance(prec, int):\n num_fmt = '%%.%df' % prec\n elif isinstance(prec, basestring):\n num_fmt = prec\n else:\n raise TypeError\n\n # TODO: Support 3D geometries.\n coord_fmt = ' '.join([num_fmt, num_fmt])\n\n def formatted_coords(coords):\n return ','.join([coord_fmt % c[:2] for c in coords])\n\n def formatted_poly(poly):\n return ','.join(['(%s)' % formatted_coords(r) for r in poly])\n\n def formatted_geom(g):\n gtype = str(g.geom_type).upper()\n yield '%s(' % gtype\n if gtype == 'POINT':\n yield formatted_coords((g.coords,))\n elif gtype in ('LINESTRING', 'LINEARRING'):\n yield formatted_coords(g.coords)\n elif gtype in ('POLYGON', 'MULTILINESTRING'):\n yield formatted_poly(g)\n elif gtype == 'MULTIPOINT':\n yield formatted_coords(g.coords)\n elif gtype == 'MULTIPOLYGON':\n yield ','.join(['(%s)' % formatted_poly(p) for p in g])\n elif gtype == 'GEOMETRYCOLLECTION':\n yield ','.join([''.join([wkt for wkt in formatted_geom(child)]) for child in g])\n else:\n raise TypeError\n yield ')'\n\n return ''.join([wkt for wkt in formatted_geom(geom)])\n"},"license":{"kind":"string","value":"mit"}}},{"rowIdx":2781,"cells":{"repo_name":{"kind":"string","value":"manumathewthomas/Chat-with-Joey"},"path":{"kind":"string","value":"chatbot/chatbot.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"31681"},"content":{"kind":"string","value":"# Copyright 2015 Conchylicultor. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"\nMain script. See README.md for more information\n\nUse python 3\n\"\"\"\n\nimport argparse # Command line parsing\nimport configparser # Saving the models parameters\nimport datetime # Chronometer\nimport os # Files management\nimport tensorflow as tf\nimport numpy as np\nimport math\n\nfrom tqdm import tqdm # Progress bar\nfrom tensorflow.python import debug as tf_debug\n\nfrom chatbot.textdata import TextData\nfrom chatbot.model import Model\n\n\nclass Chatbot:\n \"\"\"\n Main class which launch the training or testing mode\n \"\"\"\n\n class TestMode:\n \"\"\" Simple structure representing the different testing modes\n \"\"\"\n ALL = 'all'\n INTERACTIVE = 'interactive' # The user can write his own questions\n DAEMON = 'daemon' # The chatbot runs on background and can regularly be called to predict something\n\n def __init__(self):\n \"\"\"\n \"\"\"\n # Model/dataset parameters\n self.args = None\n\n # Task specific object\n self.textData = None # Dataset\n self.model = None # Sequence to sequence model\n\n # Tensorflow utilities for convenience saving/logging\n self.writer = None\n self.saver = None\n self.modelDir = '' # Where the model is saved\n self.globStep = 0 # Represent the number of iteration for the current model\n\n # TensorFlow main session (we keep track for the daemon)\n self.sess = None\n\n # Filename and directories constants\n self.MODEL_DIR_BASE = 'save/model'\n self.MODEL_NAME_BASE = 'model'\n self.MODEL_EXT = '.ckpt'\n self.CONFIG_FILENAME = 'params.ini'\n self.CONFIG_VERSION = '0.4'\n self.TEST_IN_NAME = 'data/test/samples.txt'\n self.TEST_OUT_SUFFIX = '_predictions.txt'\n self.SENTENCES_PREFIX = ['Q: ', 'A: ']\n\n @staticmethod\n def parseArgs(args):\n \"\"\"\n Parse the arguments from the given command line\n Args:\n args (list): List of arguments to parse. If None, the default sys.argv will be parsed\n \"\"\"\n\n parser = argparse.ArgumentParser()\n\n # Global options\n globalArgs = parser.add_argument_group('Global options')\n globalArgs.add_argument('--test',\n nargs='?',\n choices=[Chatbot.TestMode.ALL, Chatbot.TestMode.INTERACTIVE, Chatbot.TestMode.DAEMON],\n const=Chatbot.TestMode.ALL, default=None,\n help='if present, launch the program try to answer all sentences from data/test/ with'\n ' the defined model(s), in interactive mode, the user can wrote his own sentences,'\n ' use daemon mode to integrate the chatbot in another program')\n globalArgs.add_argument('--createDataset', action='store_true', help='if present, the program will only generate the dataset from the corpus (no training/testing)')\n globalArgs.add_argument('--playDataset', type=int, nargs='?', const=10, default=None, help='if set, the program will randomly play some samples(can be use conjointly with createDataset if this is the only action you want to perform)')\n globalArgs.add_argument('--reset', action='store_true', help='use this if you want to ignore the previous model present on the model directory (Warning: the model will be destroyed with all the folder content)')\n globalArgs.add_argument('--verbose', action='store_true', help='When testing, will plot the outputs at the same time they are computed')\n globalArgs.add_argument('--debug', action='store_true', help='run DeepQA with Tensorflow debug mode. Read TF documentation for more details on this.')\n globalArgs.add_argument('--keepAll', action='store_true', help='If this option is set, all saved model will be kept (Warning: make sure you have enough free disk space or increase saveEvery)') # TODO: Add an option to delimit the max size\n globalArgs.add_argument('--modelTag', type=str, default=None, help='tag to differentiate which model to store/load')\n globalArgs.add_argument('--rootDir', type=str, default=None, help='folder where to look for the models and data')\n globalArgs.add_argument('--watsonMode', action='store_true', help='Inverse the questions and answer when training (the network try to guess the question)')\n globalArgs.add_argument('--autoEncode', action='store_true', help='Randomly pick the question or the answer and use it both as input and output')\n globalArgs.add_argument('--device', type=str, default=None, help='\\'gpu\\' or \\'cpu\\' (Warning: make sure you have enough free RAM), allow to choose on which hardware run the model')\n globalArgs.add_argument('--seed', type=int, default=None, help='random seed for replication')\n\n # Dataset options\n datasetArgs = parser.add_argument_group('Dataset options')\n datasetArgs.add_argument('--corpus', choices=TextData.corpusChoices(), default=TextData.corpusChoices()[0], help='corpus on which extract the dataset.')\n datasetArgs.add_argument('--datasetTag', type=str, default='', help='add a tag to the dataset (file where to load the vocabulary and the precomputed samples, not the original corpus). Useful to manage multiple versions. Also used to define the file used for the lightweight format.') # The samples are computed from the corpus if it does not exist already. There are saved in \\'data/samples/\\'\n datasetArgs.add_argument('--ratioDataset', type=float, default=1.0, help='ratio of dataset used to avoid using the whole dataset') # Not implemented, useless ?\n datasetArgs.add_argument('--maxLength', type=int, default=10, help='maximum length of the sentence (for input and output), define number of maximum step of the RNN')\n datasetArgs.add_argument('--lightweightFile', type=str, default=None, help='file containing our lightweight-formatted corpus')\n\n # Network options (Warning: if modifying something here, also make the change on save/loadParams() )\n nnArgs = parser.add_argument_group('Network options', 'architecture related option')\n nnArgs.add_argument('--hiddenSize', type=int, default=256, help='number of hidden units in each RNN cell')\n nnArgs.add_argument('--numLayers', type=int, default=2, help='number of rnn layers')\n nnArgs.add_argument('--embeddingSize', type=int, default=32, help='embedding size of the word representation')\n nnArgs.add_argument('--initEmbeddings', action='store_true', help='if present, the program will initialize the embeddings with pre-trained word2vec vectors')\n nnArgs.add_argument('--softmaxSamples', type=int, default=0, help='Number of samples in the sampled softmax loss function. A value of 0 deactivates sampled softmax')\n\n # Training options\n trainingArgs = parser.add_argument_group('Training options')\n trainingArgs.add_argument('--numEpochs', type=int, default=30, help='maximum number of epochs to run')\n trainingArgs.add_argument('--saveEvery', type=int, default=1000, help='nb of mini-batch step before creating a model checkpoint')\n trainingArgs.add_argument('--batchSize', type=int, default=10, help='mini-batch size')\n trainingArgs.add_argument('--learningRate', type=float, default=0.001, help='Learning rate')\n\n return parser.parse_args(args)\n\n def main(self, args=None):\n \"\"\"\n Launch the training and/or the interactive mode\n \"\"\"\n print('Welcome to DeepQA v0.1 !')\n print()\n print('TensorFlow detected: v{}'.format(tf.__version__))\n\n # General initialisation\n\n self.args = self.parseArgs(args)\n\n if not self.args.rootDir:\n self.args.rootDir = os.getcwd() # Use the current working directory\n\n #tf.logging.set_verbosity(tf.logging.INFO) # DEBUG, INFO, WARN (default), ERROR, or FATAL\n\n self.loadModelParams() # Update the self.modelDir and self.globStep, for now, not used when loading Model (but need to be called before _getSummaryName)\n\n self.textData = TextData(self.args)\n # TODO: Add a mode where we can force the input of the decoder // Try to visualize the predictions for\n # each word of the vocabulary / decoder input\n # TODO: For now, the model are trained for a specific dataset (because of the maxLength which define the\n # vocabulary). Add a compatibility mode which allow to launch a model trained on a different vocabulary (\n # remap the word2id/id2word variables).\n if self.args.createDataset:\n print('Dataset created! Thanks for using this program')\n return # No need to go further\n\n # Prepare the model\n with tf.device(self.getDevice()):\n self.model = Model(self.args, self.textData)\n\n # Saver/summaries\n self.writer = tf.summary.FileWriter(self._getSummaryName())\n self.saver = tf.train.Saver(max_to_keep=200, write_version=tf.train.SaverDef.V1) # TODO: See GitHub for format name issue (when restoring the model)\n\n # TODO: Fixed seed (WARNING: If dataset shuffling, make sure to do that after saving the\n # dataset, otherwise, all which cames after the shuffling won't be replicable when\n # reloading the dataset). How to restore the seed after loading ??\n # Also fix seed for random.shuffle (does it works globally for all files ?)\n\n # Running session\n self.sess = tf.Session(config=tf.ConfigProto(\n allow_soft_placement=True, # Allows backup device for non GPU-available operations (when forcing GPU)\n log_device_placement=False) # Too verbose ?\n ) # TODO: Replace all sess by self.sess (not necessary a good idea) ?\n\n if self.args.debug:\n self.sess = tf_debug.LocalCLIDebugWrapperSession(self.sess)\n self.sess.add_tensor_filter(\"has_inf_or_nan\", tf_debug.has_inf_or_nan)\n\n print('Initialize variables...')\n self.sess.run(tf.global_variables_initializer())\n\n # Reload the model eventually (if it exist.), on testing mode, the models are not loaded here (but in predictTestset)\n if self.args.test != Chatbot.TestMode.ALL:\n self.managePreviousModel(self.sess)\n\n # Initialize embeddings with pre-trained word2vec vectors\n if self.args.initEmbeddings:\n print(\"Loading pre-trained embeddings from GoogleNews-vectors-negative300.bin\")\n self.loadEmbedding(self.sess)\n\n if self.args.test:\n if self.args.test == Chatbot.TestMode.INTERACTIVE:\n self.mainTestInteractive(self.sess)\n elif self.args.test == Chatbot.TestMode.ALL:\n print('Start predicting...')\n self.predictTestset(self.sess)\n print('All predictions done')\n elif self.args.test == Chatbot.TestMode.DAEMON:\n print('Daemon mode, running in background...')\n else:\n raise RuntimeError('Unknown test mode: {}'.format(self.args.test)) # Should never happen\n else:\n self.mainTrain(self.sess)\n\n if self.args.test != Chatbot.TestMode.DAEMON:\n self.sess.close()\n print(\"The End! Thanks for using this program\")\n\n def mainTrain(self, sess):\n \"\"\" Training loop\n Args:\n sess: The current running session\n \"\"\"\n\n # Specific training dependent loading\n\n self.textData.makeLighter(self.args.ratioDataset) # Limit the number of training samples\n\n mergedSummaries = tf.summary.merge_all() # Define the summary operator (Warning: Won't appear on the tensorboard graph)\n if self.globStep == 0: # Not restoring from previous run\n self.writer.add_graph(sess.graph) # First time only\n\n # If restoring a model, restore the progression bar ? and current batch ?\n\n print('Start training (press Ctrl+C to save and exit)...')\n\n try: # If the user exit while training, we still try to save the model\n for e in range(self.args.numEpochs):\n\n print()\n print(\"----- Epoch {}/{} ; (lr={}) -----\".format(e+1, self.args.numEpochs, self.args.learningRate))\n\n batches = self.textData.getBatches()\n\n # TODO: Also update learning parameters eventually\n\n tic = datetime.datetime.now()\n for nextBatch in tqdm(batches, desc=\"Training\"):\n # Training pass\n ops, feedDict = self.model.step(nextBatch)\n assert len(ops) == 2 # training, loss\n _, loss, summary = sess.run(ops + (mergedSummaries,), feedDict)\n self.writer.add_summary(summary, self.globStep)\n self.globStep += 1\n\n # Output training status\n if self.globStep % 100 == 0:\n perplexity = math.exp(float(loss)) if loss < 300 else float(\"inf\")\n tqdm.write(\"----- Step %d -- Loss %.2f -- Perplexity %.2f\" % (self.globStep, loss, perplexity))\n\n # Checkpoint\n if self.globStep % self.args.saveEvery == 0:\n self._saveSession(sess)\n\n toc = datetime.datetime.now()\n\n print(\"Epoch finished in {}\".format(toc-tic)) # Warning: Will overflow if an epoch takes more than 24 hours, and the output isn't really nicer\n except (KeyboardInterrupt, SystemExit): # If the user press Ctrl+C while testing progress\n print('Interruption detected, exiting the program...')\n\n self._saveSession(sess) # Ultimate saving before complete exit\n\n def predictTestset(self, sess):\n \"\"\" Try predicting the sentences from the samples.txt file.\n The sentences are saved on the modelDir under the same name\n Args:\n sess: The current running session\n \"\"\"\n\n # Loading the file to predict\n with open(os.path.join(self.args.rootDir, self.TEST_IN_NAME), 'r') as f:\n lines = f.readlines()\n\n modelList = self._getModelList()\n if not modelList:\n print('Warning: No model found in \\'{}\\'. Please train a model before trying to predict'.format(self.modelDir))\n return\n\n # Predicting for each model present in modelDir\n for modelName in sorted(modelList): # TODO: Natural sorting\n print('Restoring previous model from {}'.format(modelName))\n self.saver.restore(sess, modelName)\n print('Testing...')\n\n saveName = modelName[:-len(self.MODEL_EXT)] + self.TEST_OUT_SUFFIX # We remove the model extension and add the prediction suffix\n with open(saveName, 'w') as f:\n nbIgnored = 0\n for line in tqdm(lines, desc='Sentences'):\n question = line[:-1] # Remove the endl character\n\n answer = self.singlePredict(question)\n if not answer:\n nbIgnored += 1\n continue # Back to the beginning, try again\n\n predString = '{x[0]}{0}\\n{x[1]}{1}\\n\\n'.format(question, self.textData.sequence2str(answer, clean=True), x=self.SENTENCES_PREFIX)\n if self.args.verbose:\n tqdm.write(predString)\n f.write(predString)\n print('Prediction finished, {}/{} sentences ignored (too long)'.format(nbIgnored, len(lines)))\n\n def mainTestInteractive(self, sess):\n \"\"\" Try predicting the sentences that the user will enter in the console\n Args:\n sess: The current running session\n \"\"\"\n # TODO: If verbose mode, also show similar sentences from the training set with the same words (include in mainTest also)\n # TODO: Also show the top 10 most likely predictions for each predicted output (when verbose mode)\n # TODO: Log the questions asked for latter re-use (merge with test/samples.txt)\n\n print('Testing: Launch interactive mode:')\n print('')\n print('Welcome to the interactive mode, here you can ask to Deep Q&A the sentence you want. Don\\'t have high '\n 'expectation. Type \\'exit\\' or just press ENTER to quit the program. Have fun.')\n\n while True:\n question = input(self.SENTENCES_PREFIX[0])\n if question == '' or question == 'exit':\n break\n\n questionSeq = [] # Will be contain the question as seen by the encoder\n answer = self.singlePredict(question, questionSeq)\n if not answer:\n print('Warning: sentence too long, sorry. Maybe try a simpler sentence.')\n continue # Back to the beginning, try again\n\n print('{}{}'.format(self.SENTENCES_PREFIX[1], self.textData.sequence2str(answer, clean=True)))\n\n if self.args.verbose:\n print(self.textData.batchSeq2str(questionSeq, clean=True, reverse=True))\n print(self.textData.sequence2str(answer))\n\n print()\n\n def singlePredict(self, question, questionSeq=None):\n \"\"\" Predict the sentence\n Args:\n question (str): the raw input sentence\n questionSeq (List): output argument. If given will contain the input batch sequence\n Return:\n list : the word ids corresponding to the answer\n \"\"\"\n # Create the input batch\n batch = self.textData.sentence2enco(question)\n if not batch:\n return None\n if questionSeq is not None: # If the caller want to have the real input\n questionSeq.extend(batch.encoderSeqs)\n\n # Run the model\n ops, feedDict = self.model.step(batch)\n output = self.sess.run(ops[0], feedDict) # TODO: Summarize the output too (histogram, ...)\n answer = self.textData.deco2sentence(output)\n\n return answer\n\n def daemonPredict(self, sentence):\n \"\"\" Return the answer to a given sentence (same as singlePredict() but with additional cleaning)\n Args:\n sentence (str): the raw input sentence\n Return:\n str: the human readable sentence\n \"\"\"\n return self.textData.sequence2str(\n self.singlePredict(sentence),\n clean=True\n )\n\n def daemonClose(self):\n \"\"\" A utility function to close the daemon when finish\n \"\"\"\n print('Exiting the daemon mode...')\n self.sess.close()\n print('Daemon closed.')\n\n def loadEmbedding(self, sess):\n \"\"\" Initialize embeddings with pre-trained word2vec vectors\n Will modify the embedding weights of the current loaded model\n Uses the GoogleNews pre-trained values (path hardcoded)\n \"\"\"\n\n # Fetch embedding variables from model\n with tf.variable_scope(\"embedding_rnn_seq2seq/RNN/EmbeddingWrapper\", reuse=True):\n em_in = tf.get_variable(\"embedding\")\n with tf.variable_scope(\"embedding_rnn_seq2seq/embedding_rnn_decoder\", reuse=True):\n em_out = tf.get_variable(\"embedding\")\n\n # Disable training for embeddings\n variables = tf.get_collection_ref(tf.GraphKeys.TRAINABLE_VARIABLES)\n variables.remove(em_in)\n variables.remove(em_out)\n\n # If restoring a model, we can leave here\n if self.globStep != 0:\n return\n\n # New model, we load the pre-trained word2vec data and initialize embeddings\n with open(os.path.join(self.args.rootDir, 'data/word2vec/GoogleNews-vectors-negative300.bin'), \"rb\", 0) as f:\n header = f.readline()\n vocab_size, vector_size = map(int, header.split())\n binary_len = np.dtype('float32').itemsize * vector_size\n initW = np.random.uniform(-0.25,0.25,(len(self.textData.word2id), vector_size))\n for line in tqdm(range(vocab_size)):\n word = []\n while True:\n ch = f.read(1)\n if ch == b' ':\n word = b''.join(word).decode('utf-8')\n break\n if ch != b'\\n':\n word.append(ch)\n if word in self.textData.word2id:\n initW[self.textData.word2id[word]] = np.fromstring(f.read(binary_len), dtype='float32')\n else:\n f.read(binary_len)\n\n # PCA Decomposition to reduce word2vec dimensionality\n if self.args.embeddingSize < vector_size:\n U, s, Vt = np.linalg.svd(initW, full_matrices=False)\n S = np.zeros((vector_size, vector_size), dtype=complex)\n S[:vector_size, :vector_size] = np.diag(s)\n initW = np.dot(U[:, :self.args.embeddingSize], S[:self.args.embeddingSize, :self.args.embeddingSize])\n\n # Initialize input and output embeddings\n sess.run(em_in.assign(initW))\n sess.run(em_out.assign(initW))\n\n\n def managePreviousModel(self, sess):\n \"\"\" Restore or reset the model, depending of the parameters\n If the destination directory already contains some file, it will handle the conflict as following:\n * If --reset is set, all present files will be removed (warning: no confirmation is asked) and the training\n restart from scratch (globStep & cie reinitialized)\n * Otherwise, it will depend of the directory content. If the directory contains:\n * No model files (only summary logs): works as a reset (restart from scratch)\n * Other model files, but modelName not found (surely keepAll option changed): raise error, the user should\n decide by himself what to do\n * The right model file (eventually some other): no problem, simply resume the training\n In any case, the directory will exist as it has been created by the summary writer\n Args:\n sess: The current running session\n \"\"\"\n\n print('WARNING: ', end='')\n\n modelName = self._getModelName()\n\n if os.listdir(self.modelDir):\n if self.args.reset:\n print('Reset: Destroying previous model at {}'.format(self.modelDir))\n # Analysing directory content\n elif os.path.exists(modelName): # Restore the model\n print('Restoring previous model from {}'.format(modelName))\n self.saver.restore(sess, modelName) # Will crash when --reset is not activated and the model has not been saved yet\n elif self._getModelList():\n print('Conflict with previous models.')\n raise RuntimeError('Some models are already present in \\'{}\\'. You should check them first (or re-try with the keepAll flag)'.format(self.modelDir))\n else: # No other model to conflict with (probably summary files)\n print('No previous model found, but some files found at {}. Cleaning...'.format(self.modelDir)) # Warning: No confirmation asked\n self.args.reset = True\n\n if self.args.reset:\n fileList = [os.path.join(self.modelDir, f) for f in os.listdir(self.modelDir)]\n for f in fileList:\n print('Removing {}'.format(f))\n os.remove(f)\n\n else:\n print('No previous model found, starting from clean directory: {}'.format(self.modelDir))\n\n def _saveSession(self, sess):\n \"\"\" Save the model parameters and the variables\n Args:\n sess: the current session\n \"\"\"\n tqdm.write('Checkpoint reached: saving model (don\\'t stop the run)...')\n self.saveModelParams()\n self.saver.save(sess, self._getModelName()) # TODO: Put a limit size (ex: 3GB for the modelDir)\n tqdm.write('Model saved.')\n\n def _getModelList(self):\n \"\"\" Return the list of the model files inside the model directory\n \"\"\"\n return [os.path.join(self.modelDir, f) for f in os.listdir(self.modelDir) if f.endswith(self.MODEL_EXT)]\n\n def loadModelParams(self):\n \"\"\" Load the some values associated with the current model, like the current globStep value\n For now, this function does not need to be called before loading the model (no parameters restored). However,\n the modelDir name will be initialized here so it is required to call this function before managePreviousModel(),\n _getModelName() or _getSummaryName()\n Warning: if you modify this function, make sure the changes mirror saveModelParams, also check if the parameters\n should be reset in managePreviousModel\n \"\"\"\n # Compute the current model path\n self.modelDir = os.path.join(self.args.rootDir, self.MODEL_DIR_BASE)\n if self.args.modelTag:\n self.modelDir += '-' + self.args.modelTag\n\n # If there is a previous model, restore some parameters\n configName = os.path.join(self.modelDir, self.CONFIG_FILENAME)\n if not self.args.reset and not self.args.createDataset and os.path.exists(configName):\n # Loading\n config = configparser.ConfigParser()\n config.read(configName)\n\n # Check the version\n currentVersion = config['General'].get('version')\n if currentVersion != self.CONFIG_VERSION:\n raise UserWarning('Present configuration version {0} does not match {1}. You can try manual changes on \\'{2}\\''.format(currentVersion, self.CONFIG_VERSION, configName))\n\n # Restoring the the parameters\n self.globStep = config['General'].getint('globStep')\n self.args.maxLength = config['General'].getint('maxLength') # We need to restore the model length because of the textData associated and the vocabulary size (TODO: Compatibility mode between different maxLength)\n self.args.watsonMode = config['General'].getboolean('watsonMode')\n self.args.autoEncode = config['General'].getboolean('autoEncode')\n self.args.corpus = config['General'].get('corpus')\n self.args.datasetTag = config['General'].get('datasetTag', '')\n\n self.args.hiddenSize = config['Network'].getint('hiddenSize')\n self.args.numLayers = config['Network'].getint('numLayers')\n self.args.embeddingSize = config['Network'].getint('embeddingSize')\n self.args.initEmbeddings = config['Network'].getboolean('initEmbeddings')\n self.args.softmaxSamples = config['Network'].getint('softmaxSamples')\n\n # No restoring for training params, batch size or other non model dependent parameters\n\n # Show the restored params\n print()\n print('Warning: Restoring parameters:')\n print('globStep: {}'.format(self.globStep))\n print('maxLength: {}'.format(self.args.maxLength))\n print('watsonMode: {}'.format(self.args.watsonMode))\n print('autoEncode: {}'.format(self.args.autoEncode))\n print('corpus: {}'.format(self.args.corpus))\n print('datasetTag: {}'.format(self.args.datasetTag))\n print('hiddenSize: {}'.format(self.args.hiddenSize))\n print('numLayers: {}'.format(self.args.numLayers))\n print('embeddingSize: {}'.format(self.args.embeddingSize))\n print('initEmbeddings: {}'.format(self.args.initEmbeddings))\n print('softmaxSamples: {}'.format(self.args.softmaxSamples))\n print()\n\n # For now, not arbitrary independent maxLength between encoder and decoder\n self.args.maxLengthEnco = self.args.maxLength\n self.args.maxLengthDeco = self.args.maxLength + 2\n\n if self.args.watsonMode:\n self.SENTENCES_PREFIX.reverse()\n\n\n def saveModelParams(self):\n \"\"\" Save the params of the model, like the current globStep value\n Warning: if you modify this function, make sure the changes mirror loadModelParams\n \"\"\"\n config = configparser.ConfigParser()\n config['General'] = {}\n config['General']['version'] = self.CONFIG_VERSION\n config['General']['globStep'] = str(self.globStep)\n config['General']['maxLength'] = str(self.args.maxLength)\n config['General']['watsonMode'] = str(self.args.watsonMode)\n config['General']['autoEncode'] = str(self.args.autoEncode)\n config['General']['corpus'] = str(self.args.corpus)\n config['General']['datasetTag'] = str(self.args.datasetTag)\n\n config['Network'] = {}\n config['Network']['hiddenSize'] = str(self.args.hiddenSize)\n config['Network']['numLayers'] = str(self.args.numLayers)\n config['Network']['embeddingSize'] = str(self.args.embeddingSize)\n config['Network']['initEmbeddings'] = str(self.args.initEmbeddings)\n config['Network']['softmaxSamples'] = str(self.args.softmaxSamples)\n\n # Keep track of the learning params (but without restoring them)\n config['Training (won\\'t be restored)'] = {}\n config['Training (won\\'t be restored)']['learningRate'] = str(self.args.learningRate)\n config['Training (won\\'t be restored)']['batchSize'] = str(self.args.batchSize)\n\n with open(os.path.join(self.modelDir, self.CONFIG_FILENAME), 'w') as configFile:\n config.write(configFile)\n\n def _getSummaryName(self):\n \"\"\" Parse the argument to decide were to save the summary, at the same place that the model\n The folder could already contain logs if we restore the training, those will be merged\n Return:\n str: The path and name of the summary\n \"\"\"\n return self.modelDir\n\n def _getModelName(self):\n \"\"\" Parse the argument to decide were to save/load the model\n This function is called at each checkpoint and the first time the model is load. If keepAll option is set, the\n globStep value will be included in the name.\n Return:\n str: The path and name were the model need to be saved\n \"\"\"\n modelName = os.path.join(self.modelDir, self.MODEL_NAME_BASE)\n if self.args.keepAll: # We do not erase the previously saved model by including the current step on the name\n modelName += '-' + str(self.globStep)\n return modelName + self.MODEL_EXT\n\n def getDevice(self):\n \"\"\" Parse the argument to decide on which device run the model\n Return:\n str: The name of the device on which run the program\n \"\"\"\n if self.args.device == 'cpu':\n return '/cpu:0'\n elif self.args.device == 'gpu':\n return '/gpu:0'\n elif self.args.device is None: # No specified device (default)\n return None\n else:\n print('Warning: Error in the device name: {}, use the default device'.format(self.args.device))\n return None\n"},"license":{"kind":"string","value":"apache-2.0"}}},{"rowIdx":2782,"cells":{"repo_name":{"kind":"string","value":"codemeow5/PyPack"},"path":{"kind":"string","value":"pypack/protocol.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"2657"},"content":{"kind":"string","value":"\"\"\" Class and function related to protocol operation\n\"\"\"\n\nimport datetime\nimport struct\n\nMSG_TYPE_SEND = 0x1\nMSG_TYPE_ACK = 0x2\nMSG_TYPE_RECEIVED = 0x3\nMSG_TYPE_RELEASE = 0x4\nMSG_TYPE_COMPLETED = 0x5\n\nQOS0 = 0\nQOS1 = 1\nQOS2 = 2\n\n# MAX_DATETIME = int((datetime.datetime(2500, 1, 1) - datetime.datetime(1970, 1, 1)).total_seconds())\n\nclass Packet(object):\n \"\"\" This is a class that describe an incoming or outgoing message\n\n Members:\n\n msg_type : Enum. message type\n qos : Enum. quality of service level\n dup : Bool. whether the message is resent\n msg_id : Number. message id\n remaining_length : Number. payload length\n total_length : Number. buffer length\n payload : String. message body\n buff : String. full message\n confirm : whether the message is answered\n retry_times : resent times\n timestamp : next send time\n \"\"\"\n def __init__(self, msg_type=MSG_TYPE_SEND, qos=QOS0, dup=False, msg_id=0, payload=None):\n self.msg_type = msg_type\n self.qos = qos\n self.dup = dup\n self.msg_id = msg_id\n if payload is not None and not isinstance(payload, str):\n raise TypeError(\"parameter payload must be str, not %s\" % type(payload).__name__)\n self.payload = payload\n if payload is None:\n self.remaining_length = 0\n else:\n self.remaining_length = len(payload)\n self.total_length = 5 + self.remaining_length\n self.confirm = False\n self.retry_times = 0\n self.timestamp = 0\n self.buff = None\n\n @staticmethod\n def encode(packet):\n \"\"\" Encode packet object and fill buff field\n \"\"\"\n buff = bytearray()\n fixed_header = (packet.msg_type << 4) | (packet.qos << 2) | (packet.dup << 1)\n buff.extend(struct.pack(\"!B\", fixed_header))\n buff.extend(struct.pack(\"!H\", packet.msg_id))\n buff.extend(struct.pack(\"!H\", packet.remaining_length))\n if packet.payload is not None:\n buff.extend(packet.payload)\n packet.buff = str(buff)\n\n @staticmethod\n def decode(buff):\n \"\"\" Convert buff string to packet object\n \"\"\"\n (fixed_header, msg_id, remaining_length) = struct.unpack(\"!BHH\", buff[:5])\n msg_type = fixed_header >> 4\n qos = (fixed_header & 0xf) >> 2\n dup = (fixed_header & 0x3) >> 1\n if len(buff) >= 5 + remaining_length:\n (_, payload) = struct.unpack(\"!5s%ss\" % remaining_length, buff[:5 + remaining_length])\n packet = Packet(msg_type, qos, dup, msg_id, payload)\n packet.buff = buff\n return packet\n else:\n return None\n"},"license":{"kind":"string","value":"mit"}}},{"rowIdx":2783,"cells":{"repo_name":{"kind":"string","value":"ioanaantoche/muhaha"},"path":{"kind":"string","value":"ioana/examples/feet.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"1624"},"content":{"kind":"string","value":"import sys\nfrom naoqi import ALProxy\nimport time\n\ndef main(robotIP):\n PORT = 9559\n\n try:\n motionProxy = ALProxy(\"ALMotion\", robotIP, PORT)\n except Exception,e:\n print \"Could not create proxy to ALMotion\"\n print \"Error was: \",e\n sys.exit(1)\n\n try:\n postureProxy = ALProxy(\"ALRobotPosture\", robotIP, PORT)\n except Exception, e:\n print \"Could not create proxy to ALRobotPosture\"\n print \"Error was: \", e\n\n # Send NAO to Pose Init\n postureProxy.goToPosture(\"StandInit\", 0.5)\n\n motionProxy.wbEnable(True)\n\n # Example showing how to fix the feet.\n #print \"Feet fixed.\"\n #stateName = \"Fixed\"\n #supportLeg = \"Legs\"\n #motionProxy.wbFootState(stateName, supportLeg)\n\n # Example showing how to fix the left leg and constrained in a plane the right leg.\n #print \"Left leg fixed, right leg in a plane.\"\n #motionProxy.wbFootState(\"Fixed\", \"LLeg\")\n #motionProxy.wbFootState(\"Plane\", \"RLeg\")\n\n # Example showing how to fix the left leg and keep free the right leg.\n print \"Left leg fixed, right leg free\"\n motionProxy.wbFootState(\"Fixed\", \"LLeg\")\n motionProxy.wbFootState(\"Free\", \"RLeg\")\n\n time.sleep(10.0)\n print \"motionProxy.wbEnable(False)\"\n motionProxy.wbEnable(False)\n\n time.sleep(5.0)\n print \"postureProxy.goToPosture(Sit, 0.5)\"\n postureProxy.goToPosture(\"SitRelax\", 0.5)\n\n\n\nif __name__ == \"__main__\":\n robotIp = \"127.0.0.1\"\n\n if len(sys.argv) <= 1:\n print \"Usage python almotion_wbfootstate.py robotIP (optional default: 127.0.0.1)\"\n else:\n robotIp = sys.argv[1]\n\n main(robotIp)"},"license":{"kind":"string","value":"gpl-2.0"}}},{"rowIdx":2784,"cells":{"repo_name":{"kind":"string","value":"seomoz/gevent-soup"},"path":{"kind":"string","value":"bs4/element.py"},"copies":{"kind":"string","value":"438"},"size":{"kind":"string","value":"61538"},"content":{"kind":"string","value":"import collections\nimport re\nimport sys\nimport warnings\nfrom bs4.dammit import EntitySubstitution\n\nDEFAULT_OUTPUT_ENCODING = \"utf-8\"\nPY3K = (sys.version_info[0] > 2)\n\nwhitespace_re = re.compile(\"\\s+\")\n\ndef _alias(attr):\n \"\"\"Alias one attribute name to another for backward compatibility\"\"\"\n @property\n def alias(self):\n return getattr(self, attr)\n\n @alias.setter\n def alias(self):\n return setattr(self, attr)\n return alias\n\n\nclass NamespacedAttribute(unicode):\n\n def __new__(cls, prefix, name, namespace=None):\n if name is None:\n obj = unicode.__new__(cls, prefix)\n elif prefix is None:\n # Not really namespaced.\n obj = unicode.__new__(cls, name)\n else:\n obj = unicode.__new__(cls, prefix + \":\" + name)\n obj.prefix = prefix\n obj.name = name\n obj.namespace = namespace\n return obj\n\nclass AttributeValueWithCharsetSubstitution(unicode):\n \"\"\"A stand-in object for a character encoding specified in HTML.\"\"\"\n\nclass CharsetMetaAttributeValue(AttributeValueWithCharsetSubstitution):\n \"\"\"A generic stand-in for the value of a meta tag's 'charset' attribute.\n\n When Beautiful Soup parses the markup '', the\n value of the 'charset' attribute will be one of these objects.\n \"\"\"\n\n def __new__(cls, original_value):\n obj = unicode.__new__(cls, original_value)\n obj.original_value = original_value\n return obj\n\n def encode(self, encoding):\n return encoding\n\n\nclass ContentMetaAttributeValue(AttributeValueWithCharsetSubstitution):\n \"\"\"A generic stand-in for the value of a meta tag's 'content' attribute.\n\n When Beautiful Soup parses the markup:\n \n\n The value of the 'content' attribute will be one of these objects.\n \"\"\"\n\n CHARSET_RE = re.compile(\"((^|;)\\s*charset=)([^;]*)\", re.M)\n\n def __new__(cls, original_value):\n match = cls.CHARSET_RE.search(original_value)\n if match is None:\n # No substitution necessary.\n return unicode.__new__(unicode, original_value)\n\n obj = unicode.__new__(cls, original_value)\n obj.original_value = original_value\n return obj\n\n def encode(self, encoding):\n def rewrite(match):\n return match.group(1) + encoding\n return self.CHARSET_RE.sub(rewrite, self.original_value)\n\nclass HTMLAwareEntitySubstitution(EntitySubstitution):\n\n \"\"\"Entity substitution rules that are aware of some HTML quirks.\n\n Specifically, the contents of