{ // 获取包含Hugging Face文本的span元素 const spans = link.querySelectorAll('span.whitespace-nowrap, span.hidden.whitespace-nowrap'); spans.forEach(span => { if (span.textContent && span.textContent.trim().match(/Hugging\s*Face/i)) { span.textContent = 'AI快站'; } }); }); // 替换logo图片的alt属性 document.querySelectorAll('img[alt*="Hugging"], img[alt*="Face"]').forEach(img => { if (img.alt.match(/Hugging\s*Face/i)) { img.alt = 'AI快站 logo'; } }); } // 替换导航栏中的链接 function replaceNavigationLinks() { // 已替换标记,防止重复运行 if (window._navLinksReplaced) { return; } // 已经替换过的链接集合,防止重复替换 const replacedLinks = new Set(); // 只在导航栏区域查找和替换链接 const headerArea = document.querySelector('header') || document.querySelector('nav'); if (!headerArea) { return; } // 在导航区域内查找链接 const navLinks = headerArea.querySelectorAll('a'); navLinks.forEach(link => { // 如果已经替换过,跳过 if (replacedLinks.has(link)) return; const linkText = link.textContent.trim(); const linkHref = link.getAttribute('href') || ''; // 替换Spaces链接 - 仅替换一次 if ( (linkHref.includes('/spaces') || linkHref === '/spaces' || linkText === 'Spaces' || linkText.match(/^s*Spacess*$/i)) && linkText !== 'OCR模型免费转Markdown' && linkText !== 'OCR模型免费转Markdown' ) { link.textContent = 'OCR模型免费转Markdown'; link.href = 'https://fast360.xyz'; link.setAttribute('target', '_blank'); link.setAttribute('rel', 'noopener noreferrer'); replacedLinks.add(link); } // 删除Posts链接 else if ( (linkHref.includes('/posts') || linkHref === '/posts' || linkText === 'Posts' || linkText.match(/^s*Postss*$/i)) ) { if (link.parentNode) { link.parentNode.removeChild(link); } replacedLinks.add(link); } // 替换Docs链接 - 仅替换一次 else if ( (linkHref.includes('/docs') || linkHref === '/docs' || linkText === 'Docs' || linkText.match(/^s*Docss*$/i)) && linkText !== '模型下载攻略' ) { link.textContent = '模型下载攻略'; link.href = '/'; replacedLinks.add(link); } // 删除Enterprise链接 else if ( (linkHref.includes('/enterprise') || linkHref === '/enterprise' || linkText === 'Enterprise' || linkText.match(/^s*Enterprises*$/i)) ) { if (link.parentNode) { link.parentNode.removeChild(link); } replacedLinks.add(link); } }); // 查找可能嵌套的Spaces和Posts文本 const textNodes = []; function findTextNodes(element) { if (element.nodeType === Node.TEXT_NODE) { const text = element.textContent.trim(); if (text === 'Spaces' || text === 'Posts' || text === 'Enterprise') { textNodes.push(element); } } else { for (const child of element.childNodes) { findTextNodes(child); } } } // 只在导航区域内查找文本节点 findTextNodes(headerArea); // 替换找到的文本节点 textNodes.forEach(node => { const text = node.textContent.trim(); if (text === 'Spaces') { node.textContent = node.textContent.replace(/Spaces/g, 'OCR模型免费转Markdown'); } else if (text === 'Posts') { // 删除Posts文本节点 if (node.parentNode) { node.parentNode.removeChild(node); } } else if (text === 'Enterprise') { // 删除Enterprise文本节点 if (node.parentNode) { node.parentNode.removeChild(node); } } }); // 标记已替换完成 window._navLinksReplaced = true; } // 替换代码区域中的域名 function replaceCodeDomains() { // 特别处理span.hljs-string和span.njs-string元素 document.querySelectorAll('span.hljs-string, span.njs-string, span[class*="hljs-string"], span[class*="njs-string"]').forEach(span => { if (span.textContent && span.textContent.includes('huggingface.co')) { span.textContent = span.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } }); // 替换hljs-string类的span中的域名(移除多余的转义符号) document.querySelectorAll('span.hljs-string, span[class*="hljs-string"]').forEach(span => { if (span.textContent && span.textContent.includes('huggingface.co')) { span.textContent = span.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } }); // 替换pre和code标签中包含git clone命令的域名 document.querySelectorAll('pre, code').forEach(element => { if (element.textContent && element.textContent.includes('git clone')) { const text = element.innerHTML; if (text.includes('huggingface.co')) { element.innerHTML = text.replace(/huggingface.co/g, 'aifasthub.com'); } } }); // 处理特定的命令行示例 document.querySelectorAll('pre, code').forEach(element => { const text = element.innerHTML; if (text.includes('huggingface.co')) { // 针对git clone命令的专门处理 if (text.includes('git clone') || text.includes('GIT_LFS_SKIP_SMUDGE=1')) { element.innerHTML = text.replace(/huggingface.co/g, 'aifasthub.com'); } } }); // 特别处理模型下载页面上的代码片段 document.querySelectorAll('.flex.border-t, .svelte_hydrator, .inline-block').forEach(container => { const content = container.innerHTML; if (content && content.includes('huggingface.co')) { container.innerHTML = content.replace(/huggingface.co/g, 'aifasthub.com'); } }); // 特别处理模型仓库克隆对话框中的代码片段 try { // 查找包含"Clone this model repository"标题的对话框 const cloneDialog = document.querySelector('.svelte_hydration_boundary, [data-target="MainHeader"]'); if (cloneDialog) { // 查找对话框中所有的代码片段和命令示例 const codeElements = cloneDialog.querySelectorAll('pre, code, span'); codeElements.forEach(element => { if (element.textContent && element.textContent.includes('huggingface.co')) { if (element.innerHTML.includes('huggingface.co')) { element.innerHTML = element.innerHTML.replace(/huggingface.co/g, 'aifasthub.com'); } else { element.textContent = element.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } } }); } // 更精确地定位克隆命令中的域名 document.querySelectorAll('[data-target]').forEach(container => { const codeBlocks = container.querySelectorAll('pre, code, span.hljs-string'); codeBlocks.forEach(block => { if (block.textContent && block.textContent.includes('huggingface.co')) { if (block.innerHTML.includes('huggingface.co')) { block.innerHTML = block.innerHTML.replace(/huggingface.co/g, 'aifasthub.com'); } else { block.textContent = block.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } } }); }); } catch (e) { // 错误处理但不打印日志 } } // 当DOM加载完成后执行替换 if (document.readyState === 'loading') { document.addEventListener('DOMContentLoaded', () => { replaceHeaderBranding(); replaceNavigationLinks(); replaceCodeDomains(); // 只在必要时执行替换 - 3秒后再次检查 setTimeout(() => { if (!window._navLinksReplaced) { console.log('[Client] 3秒后重新检查导航链接'); replaceNavigationLinks(); } }, 3000); }); } else { replaceHeaderBranding(); replaceNavigationLinks(); replaceCodeDomains(); // 只在必要时执行替换 - 3秒后再次检查 setTimeout(() => { if (!window._navLinksReplaced) { console.log('[Client] 3秒后重新检查导航链接'); replaceNavigationLinks(); } }, 3000); } // 增加一个MutationObserver来处理可能的动态元素加载 const observer = new MutationObserver(mutations => { // 检查是否导航区域有变化 const hasNavChanges = mutations.some(mutation => { // 检查是否存在header或nav元素变化 return Array.from(mutation.addedNodes).some(node => { if (node.nodeType === Node.ELEMENT_NODE) { // 检查是否是导航元素或其子元素 if (node.tagName === 'HEADER' || node.tagName === 'NAV' || node.querySelector('header, nav')) { return true; } // 检查是否在导航元素内部 let parent = node.parentElement; while (parent) { if (parent.tagName === 'HEADER' || parent.tagName === 'NAV') { return true; } parent = parent.parentElement; } } return false; }); }); // 只在导航区域有变化时执行替换 if (hasNavChanges) { // 重置替换状态,允许再次替换 window._navLinksReplaced = false; replaceHeaderBranding(); replaceNavigationLinks(); } }); // 开始观察document.body的变化,包括子节点 if (document.body) { observer.observe(document.body, { childList: true, subtree: true }); } else { document.addEventListener('DOMContentLoaded', () => { observer.observe(document.body, { childList: true, subtree: true }); }); } })(); \")\n\n ################################################\n ### Action methods (called from Javascript)\n\n def export(self, doc, root, q):\n # helper function to build correct xml of question\n ques = root.appendChild(doc.createElement('question'))\n ques.setAttribute('id', q.id)\n text = ques.appendChild(doc.createElement('text'))\n text.appendChild(doc.createTextNode(q.text))\n descrip = ques.appendChild(doc.createElement('description'))\n descrip.appendChild(doc.createTextNode(q.descrip))\n ansFormat = ques.appendChild(doc.createElement('format'))\n ansFormat.appendChild(doc.createTextNode(q.format))\n \n users = ques.appendChild(doc.createElement('users'))\n allUsers = ''\n for u in q.users:\n allUsers += `u`\n users.appendChild(doc.createTextNode(allUsers))\n \n comment = ques.appendChild(doc.createElement('comment'))\n comment.appendChild(doc.createTextNode(q.comment))\n comOpt = ques.appendChild(doc.createElement('comOpt'))\n comOpt.appendChild(doc.createTextNode(q.comOpt))\n \n options = q.search1(name=\"options\")\n allOptions = options.get_child_items(self)\n opts = ques.appendChild(doc.createElement('options'))\n opts.setAttribute('id', options.id)\n num = opts.appendChild(doc.createElement('num_selections'))\n num.appendChild(doc.createTextNode(str(options.num_selections)))\n for o in allOptions:\n opt = opts.appendChild(doc.createElement('option'))\n opt.setAttribute('id', o.id)\n text = opt.appendChild(doc.createElement('text'))\n text.appendChild(doc.createTextNode(o.text))\n \n \"\"\"\n gories = q.search1(name=\"categories\")\n allCs = gories.get_child_items(self)\n cats = ques.appendChild(doc.createElement('categories'))\n cats.setAttribute('id', gories.id)\n for cat in allCs:\n ca = cats.appendChild(doc.createElement('category'))\n ca.setAttribute('id', cat.id)\n ca.appendChild(doc.createTextNode(cat.name))\n \"\"\"\n\n poet = q.search1(name=\"poet\")\n allPoet = poet.get_child_items(self)\n poetCts = ques.appendChild(doc.createElement('poet'))\n poetCts.setAttribute('id', poet.id)\n for p in allPoet:\n t = poetCts.appendChild(doc.createElement('factor'))\n t.setAttribute('id', p.id)\n t.appendChild(doc.createTextNode(p.name))\n\n sets = q.search1(name=\"sets\")\n allSets = sets.get_child_items(self)\n tag = ques.appendChild(doc.createElement('sets'))\n tag.setAttribute('id', sets.id)\n for s in allSets:\n ca = tag.appendChild(doc.createElement('set'))\n ca.setAttribute('id', s.id)\n ca.appendChild(doc.createTextNode(s.name))\n \n answers = q.search1(name=\"answers\")\n allAnswers = answers.get_child_items(self)\n answs = ques.appendChild(doc.createElement('answers'))\n answs.setAttribute('id', answers.id)\n for a in allAnswers:\n answ = answs.appendChild(doc.createElement('answer'))\n answ.setAttribute('id', a.id)\n ans = answ.appendChild(doc.createElement('answer'))\n ans.appendChild(doc.createTextNode(a.answer))\n who = answ.appendChild(doc.createElement('who'))\n who.appendChild(doc.createTextNode(a.who))\n when = answ.appendChild(doc.createElement('when'))\n when.appendChild(doc.createTextNode(a.when))\n comment = answ.appendChild(doc.createElement('comment'))\n comment.appendChild(doc.createTextNode(a.comment))\n return doc\n \n def get_findings_action(self, request, filterChoice):\n meeting = Directory.get_meeting(request.getvalue('global_rootid', ''))\n parent = meeting.get_parent()\n activities = parent.search1(view='questioneditor')\n questions = activities.search1(name=\"questions\")\n doc = xml.dom.minidom.Document()\n root = doc.appendChild(doc.createElement(\"QuestionSystem\"))\n meta = root.appendChild(doc.createElement('meta'))\n date = meta.appendChild(doc.createElement('exportdate'))\n date.appendChild(doc.createTextNode(time.strftime('%a, %d %b %Y %H:%M:%S')))\n quesRoot = root.appendChild(doc.createElement('questions'))\n xmlDoc = doc\n \n #log.info(\"*** START OF REPORT ***\")\n #log.info(\"filterChoice = \"+str(filterChoice))\n\n # Iterate through all questions, filter out the questions that match the categories\n count = 0 #only for debugging (but could be useful later)\n for q in questions:\n #log.info(\" --- QUESTION --- \")\n users = q.users\n poet = []\n sets = []\n for qchild in q:\n if qchild.name == \"poet\":\n for p in qchild:\n poet.append(p.name)\n elif qchild.name == \"sets\":\n for s in qchild:\n sets.append(s.name)\n #log.info(\"Users: \"+str(users)+\" vs. \"+str(filterChoice[0]))\n #log.info(\"Poet: \"+str(poet)+\" vs. \"+str(filterChoice[1]))\n #log.info(\"Sets: \"+str(sets)+\" vs. \"+str(filterChoice[2]))\n \n #these three checks could be rewritten as three calls to a function that takes two lists and returns True if there is any shared element\n # check users\n if 'All' in filterChoice[0]:\n includeUsers = True\n else:\n includeUsers = False\n for filterUser in filterChoice[0]: \n if filterUser in users:\n includeUsers = True\n break\n \n # check poet\n if 'All' in filterChoice[1]:\n includePoet = True\n else:\n includePoet = False\n for filterPoet in filterChoice[1]: \n if filterPoet in poet:\n includePoet = True\n break\n \n # check categories\n if 'All' in filterChoice[2]:\n includeSet = True\n else:\n includeSet = False\n for filterSet in filterChoice[2]: \n if filterSet in sets:\n includeSet = True\n break\n\n #If you want to force a question to match every element of a filter, use this logic instead:\n \"\"\"\n includeUsers = True #bool starts as true instead of false\n for filterUser in filterChoice[0]: \n if filterUser not in users: #check for \"not in\" as opposed to \"in\"\n includeUsers = False\n break\n \"\"\"\n\n #log.info(str(includeUsers)+str(includePoet)+str(includeSet))\n if includeUsers and includePoet and includeSet: \n \txmldoc = ReportFindings.export(self, doc, quesRoot, q)\n \tcount += 1\n \n #q_count+=1\n #log.info(\" ---------------- \")\n #log.info(\"# of matches: \"+str(count))\n #log.info(\"**** END OF REPORT ****\")\n f = open('qaDoc.xml','w')\n print >> f, xmlDoc.toxml()\t\n requestedQuestion = []\n events = []\n events.append(Event('viewFindings', xmlDoc.toxml()))\n return events\n\n def exportCSV_action(self, request, filters):\n meeting = Directory.get_meeting(request.getvalue('global_rootid', ''))\n parent = meeting.get_parent()\n meetingRoot = parent.get_parent()\n questioneditor = parent.search1(view='questioneditor')\n questions = questioneditor.search1(name=\"questions\")\n\n groups = meetingRoot.search1(name='groups')\n userDictionary = {} \n for group in groups:\n userDictionary[group.name] = []\n for user in group:\n userDictionary[group.name].append(user.user_id)\n \n group_filter = filters[0]\n poet_filter = filters[1]\n sets_filter = filters[2]\n\n # Step 1: Create a dictionary with a key for every existing combination of [POET]x[Set]x[Group].\n # Each key's entry will be a list of question ids that belong to that combination.\n # This dictionary acts as a \"master list\" for ordering purposes. \n qLists = {}\n for q in questions:\n #Please feel free to change these variable names if you come up with something better\n q_poet = [] #the question's poet factors\n q_poetNode = q.search1(name='poet')\n for q_p in q_poetNode:\n q_poet.append(q_p.name)\n if not q_poet: # if q_poet == []\n q_poet = [\"None\"] #this is only necessary for POET factors, because a question with no sets/groups can't be asked\n if not \"All\" in poet_filter: #change this to \"elif\", and questions without a POET factor will survive the filter anyway\n q_poet = filter(lambda x:x in q_poet, poet_filter)\n \n q_sets = [] #the question's sets\n q_setsNode = q.search1(name='sets')\n for q_set in q_setsNode:\n q_sets.append(q_set.name) \n if not \"All\" in sets_filter: #\"all\" is not in the filter set\n q_sets = filter(lambda x:x in q_sets, sets_filter)\n\n q_groups = q.users #the queston's groups\n if not \"All\" in group_filter: #\"all\" is not in the filter set\n q_groups = filter(lambda x:x in q_groups, group_filter) \n\n for qp in q_poet: #for...\n for qs in q_sets: #every...\n for qg in q_groups: #combination:\n try:\n qLists[qp+qs+qg].append(q.id) # add it to the relevant list\n except KeyError: #entry doesn't exist yet\n qLists[qp+qs+qg] = [q.id]\n \n # Step 2: Create a dictionary with a key for every combination of [User] x [POET] x [Set] x [Group].\n # Populate it with each entry a list of ints, with ints corresponding to answers to questions.\n # This is almost exactly what the final CSV will look like.\n answerData = {}\n t = {'stronglydisagree': 1, 'disagree': 2, 'somewhatdisagree': 3, 'neither': 4, 'somewhatagree': 5, 'agree': 6, 'stronglyagree': 7} #translates answers into numbers\n\n for q in questions: \n q_poet = [] #the question's poet factors\n q_poetNode = q.search1(name='poet')\n for q_p in q_poetNode:\n q_poet.append(q_p.name)\n if not q_poet: # if q_poet == []\n q_poet = [\"None\"] #this is only necessary for POET factors, because a question with no sets/groups can't be asked\n if not \"All\" in poet_filter: #change this to \"elif\", and questions without a POET factor will survive the filter anyway\n q_poet = filter(lambda x:x in q_poet, poet_filter)\n \n q_sets = [] #the question's sets\n q_setsNode = q.search1(name='sets')\n for q_set in q_setsNode:\n q_sets.append(q_set.name)\n if not \"All\" in sets_filter: #\"all\" is not in the filter set\n q_sets = filter(lambda x:x in q_sets, sets_filter)\n\n q_groups = q.users #the question's groups\n if not \"All\" in group_filter: #\"all\" is not in the filter set\n q_groups = filter(lambda x:x in q_groups, group_filter)\n\n answers = q.search1(name='answers') #all the answers that question has received\n for answer in answers: #for every individual answer...\n user = answer.who #who answered it...\n user_id = answer.creatorid\n for qp in q_poet: #and what...\n for qs in q_sets: #categories it...\n for qg in q_groups: #belongs to:\n if user_id in userDictionary[qg]: #ignore the groups the user doesn't belong to\n index = qLists[qp+qs+qg].index(q.id) #fetch the index from the master list\n entry = user+\"|\"+qp+\"|\"+qs+\"|\"+qg #compose a name with \"|\" marks for division later\n try:\n answerData[entry][index] = t[answer.answer] #update the appropriate column of the row\n except KeyError: #that row doesn't exist yet -- so make it\n answerData[entry] = [0] * len(qLists[qp+qs+qg]) #a zero for every question belonging to the poet/set/group\n answerData[entry][index] = t[answer.answer]\n\n # Step 3: Create the CSV file.\n # Each key of the dictionary created in Step 3 will be transformed into a row of the CSV.\n csv = \"Username, POET Factor, Set, Group\\n\" #the header\n for key in answerData.keys(): #each of these will be a row in the final file\n keySplit = key.split('|') #\"Alan|Political|Mandatory|PM\" -> [\"Alan\", \"Political\", \"Mandatory\", \"PM\"]\n string = \"{user}, {poet}, {set}, {group}\".format(\n user=keySplit[0], poet=keySplit[1], set=keySplit[2], group=keySplit[3]) #the key becomes the first four entries of the row\n for answer in answerData[key]:\n if answer > 0:\n string += \", \"+str(answer) #if the user answered, add that answer to the end of the row\n else:\n string += \", \" #if the user didn't answer, leave that slot blank\n string += \"\\n\" #move to next row\n csv += string #add to CSV\n\n log.info(\"csv:\\n\"+csv) #debug\n \n f = open('POET.csv','w')\n print >> f, csv\n \n events = []\n return events\n\n #######################################\n ### Window initialization methods\n\n def get_initial_events(self, request, rootid):\n '''Retrieves a list of initial javascript calls that should be sent to the client\n when the view first loads. Typically, this is a series of add_processor\n events.'''\n meeting = Directory.get_meeting(request.getvalue('global_rootid', ''))\n parent = meeting.get_parent()\n activities = parent.search1(view='questioneditor')\n events = []\n allQuestions = []\n for child in activities.search1(name=\"questions\"):\n item = datagate.get_item(child.id)\n options = item.search1(name=\"options\")\n allChoices = options.get_child_items(self)\n allOptions = []\n for choice in allChoices:\n allOptions.append(choice.text)\n allQuestions.append([child.id, child.text, child.format, child.comment, allOptions, options.num_selections, child.comOpt])\n return events\n\n def initialize_activity(self, request, new_activity):\n '''Called from the Administrator. Sets up the activity'''\n BaseView.initialize_activity(self, request, new_activity)\n\n \n \n"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2013,"string":"2,013"}}},{"rowIdx":1083,"cells":{"__id__":{"kind":"number","value":12919261663619,"string":"12,919,261,663,619"},"blob_id":{"kind":"string","value":"ee4df12b2afc98fe20f2f095eeac3f3af21d08b3"},"directory_id":{"kind":"string","value":"15fd40caf9143d9f313846e25f86d37126bdc759"},"path":{"kind":"string","value":"/sut_tools/installTests.py"},"content_id":{"kind":"string","value":"fb34d97cd17dfdff855031f850b11627ba528c60"},"detected_licenses":{"kind":"list like","value":[],"string":"[]"},"license_type":{"kind":"string","value":"no_license"},"repo_name":{"kind":"string","value":"lsblakk/tools"},"repo_url":{"kind":"string","value":"https://github.com/lsblakk/tools"},"snapshot_id":{"kind":"string","value":"e07ae50d7bb14a95241c64e83133d14493c7b3d2"},"revision_id":{"kind":"string","value":"a427d8b2790350b524b66ac14534fd836ae10476"},"branch_name":{"kind":"string","value":"HEAD"},"visit_date":{"kind":"timestamp","value":"2016-09-06T10:13:15.460163","string":"2016-09-06T10:13:15.460163"},"revision_date":{"kind":"timestamp","value":"2012-03-28T23:13:23","string":"2012-03-28T23:13:23"},"committer_date":{"kind":"timestamp","value":"2012-03-28T23:13:23","string":"2012-03-28T23:13:23"},"github_id":{"kind":"number","value":2156912,"string":"2,156,912"},"star_events_count":{"kind":"number","value":1,"string":"1"},"fork_events_count":{"kind":"number","value":1,"string":"1"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"#!/usr/bin/env python\n\nimport os, sys\nimport devicemanagerSUT as devicemanager\n\nif (len(sys.argv) <> 3):\n print \"usage: install.py \"\n sys.exit(1)\n\nprint \"connecting to: \" + sys.argv[1]\ndm = devicemanager.DeviceManagerSUT(sys.argv[1])\n\ndevRoot = dm.getDeviceRoot()\nsource = sys.argv[2]\nfilename = os.path.basename(source)\ntarget = os.path.join(devRoot, filename)\n\ndm.pushFile(source, target)\ndm.unpackFile(target)\n"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2012,"string":"2,012"}}},{"rowIdx":1084,"cells":{"__id__":{"kind":"number","value":18889266172522,"string":"18,889,266,172,522"},"blob_id":{"kind":"string","value":"269a1e0caf5e0ac25ae4bf643ca1022ce80beebc"},"directory_id":{"kind":"string","value":"ac781bfeb6e4cb47e9cbd4d25f848734f3d30097"},"path":{"kind":"string","value":"/callison/day2.python.com/controllers/login.py"},"content_id":{"kind":"string","value":"6f222ff7f774eb28453b7c525751cfd9912853af"},"detected_licenses":{"kind":"list like","value":[],"string":"[]"},"license_type":{"kind":"string","value":"no_license"},"repo_name":{"kind":"string","value":"tbsmithFS/SSL1308"},"repo_url":{"kind":"string","value":"https://github.com/tbsmithFS/SSL1308"},"snapshot_id":{"kind":"string","value":"3e435242797f6c691e7116e0468c3f208875af98"},"revision_id":{"kind":"string","value":"d2a5b79c78bf2048670352dbfc6611bf091d237e"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2021-01-01T16:40:24.282430","string":"2021-01-01T16:40:24.282430"},"revision_date":{"kind":"timestamp","value":"2013-08-29T02:17:26","string":"2013-08-29T02:17:26"},"committer_date":{"kind":"timestamp","value":"2013-08-29T02:17:26","string":"2013-08-29T02:17:26"},"github_id":{"kind":"null"},"star_events_count":{"kind":"number","value":0,"string":"0"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"#!/usr/bin/python\n\nfrom models.view import View\n\nclass Login():\n\tdef get(self, pairs, data={}):\n \n if 'action' not in pairs:\n action = 'home'\n else:\n action = pairs.getvalue('action')\n \n view_model = View()\n view_model.print_header()\n \n data = {'site_title' : \"My Python website title - login\",\n \t'logo_title' : \"My Python logo\",\n \t'main_body_text' : \"Welcome to the login page\",\n \t'copyright_info' : \"Callisonification &copy; 2013\",}\n \n view_model.get_view(\"header\", data);\n view_model.get_view(\"nav\", data);\n view_model.get_view(\"body\", data);\n view_model.get_view(\"footer\", data);"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2013,"string":"2,013"}}},{"rowIdx":1085,"cells":{"__id__":{"kind":"number","value":3032246924034,"string":"3,032,246,924,034"},"blob_id":{"kind":"string","value":"b7bede457076f3466eafdc414f01bd3c4607dc0d"},"directory_id":{"kind":"string","value":"a7d24f1685d5e5fffc57bd08fd3194ca94277b12"},"path":{"kind":"string","value":"/.config/sublime-text-2/Packages/Package Control/package_control/providers/package_provider.py"},"content_id":{"kind":"string","value":"7928e6504b7d82f8a03c19d2ad5e72b8c063eafb"},"detected_licenses":{"kind":"list like","value":["LGPL-2.0-or-later","LGPL-2.1-or-later","MIT"],"string":"[\n \"LGPL-2.0-or-later\",\n \"LGPL-2.1-or-later\",\n \"MIT\"\n]"},"license_type":{"kind":"string","value":"non_permissive"},"repo_name":{"kind":"string","value":"tekezo/dot-files"},"repo_url":{"kind":"string","value":"https://github.com/tekezo/dot-files"},"snapshot_id":{"kind":"string","value":"6cbe6a8e4c04ef5a756da86d36da6d694da1ced6"},"revision_id":{"kind":"string","value":"de05d734b9669004785ef63ce68ecb76fc4b102b"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2021-01-15T21:49:37.303834","string":"2021-01-15T21:49:37.303834"},"revision_date":{"kind":"timestamp","value":"2013-04-07T11:19:03","string":"2013-04-07T11:19:03"},"committer_date":{"kind":"timestamp","value":"2013-04-07T11:19:03","string":"2013-04-07T11:19:03"},"github_id":{"kind":"number","value":12126779,"string":"12,126,779"},"star_events_count":{"kind":"number","value":2,"string":"2"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"import json\nimport re\n\nfrom ..console_write import console_write\nfrom .platform_comparator import PlatformComparator\n\n\nclass PackageProvider(PlatformComparator):\n \"\"\"\n Generic repository downloader that fetches package info\n\n With the current channel/repository architecture where the channel file\n caches info from all includes repositories, these package providers just\n serve the purpose of downloading packages not in the default channel.\n\n The structure of the JSON a repository should contain is located in\n example-packages.json.\n\n :param repo:\n The URL of the package repository\n\n :param package_manager:\n An instance of :class:`PackageManager` used to download the file\n \"\"\"\n\n def __init__(self, repo, package_manager):\n self.repo_info = None\n self.repo = repo\n self.package_manager = package_manager\n self.unavailable_packages = []\n\n def match_url(self):\n \"\"\"Indicates if this provider can handle the provided repo\"\"\"\n\n return True\n\n def fetch_repo(self):\n \"\"\"Retrieves and loads the JSON for other methods to use\"\"\"\n\n if self.repo_info != None:\n return\n\n repository_json = self.package_manager.download_url(self.repo,\n 'Error downloading repository.')\n if repository_json == False:\n self.repo_info = False\n return\n\n try:\n self.repo_info = json.loads(repository_json)\n except (ValueError):\n console_write(u'Error parsing JSON from repository %s.' % self.repo, True)\n self.repo_info = False\n\n def get_packages(self):\n \"\"\"\n Provides access to the repository info that is cached in a channel\n\n :return:\n A dict in the format:\n {\n 'Package Name': {\n # Package details - see example-packages.json for format\n },\n ...\n }\n or False if there is an error\n \"\"\"\n\n self.fetch_repo()\n if self.repo_info == False:\n return False\n\n output = {}\n\n for package in self.repo_info['packages']:\n\n platforms = package['platforms'].keys()\n best_platform = self.get_best_platform(platforms)\n\n if not best_platform:\n self.unavailable_packages.append(package['name'])\n continue\n\n # Rewrites the legacy \"zipball\" URLs to the new \"zip\" format\n downloads = package['platforms'][best_platform]\n rewritten_downloads = []\n for download in downloads:\n download['url'] = re.sub(\n '^(https://nodeload.github.com/[^/]+/[^/]+/)zipball(/.*)$',\n '\\\\1zip\\\\2', download['url'])\n rewritten_downloads.append(download)\n\n info = {\n 'name': package['name'],\n 'description': package.get('description'),\n 'url': package.get('homepage', self.repo),\n 'author': package.get('author'),\n 'last_modified': package.get('last_modified'),\n 'downloads': rewritten_downloads\n }\n\n output[package['name']] = info\n\n return output\n\n def get_renamed_packages(self):\n \"\"\":return: A dict of the packages that have been renamed\"\"\"\n\n return self.repo_info.get('renamed_packages', {})\n\n def get_unavailable_packages(self):\n \"\"\"\n Provides a list of packages that are unavailable for the current\n platform/architecture that Sublime Text is running on.\n\n This list will be empty unless get_packages() is called first.\n\n :return: A list of package names\n \"\"\"\n\n return self.unavailable_packages\n"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2013,"string":"2,013"}}},{"rowIdx":1086,"cells":{"__id__":{"kind":"number","value":2817498570177,"string":"2,817,498,570,177"},"blob_id":{"kind":"string","value":"0c281a1f5485f670b618c144b999b2a601928377"},"directory_id":{"kind":"string","value":"8803d87c5a62e9a6d0b8895375c3443559894591"},"path":{"kind":"string","value":"/mlsvm.py"},"content_id":{"kind":"string","value":"fac0dc17dcff26cb9c86c8750567bcb7d6be4113"},"detected_licenses":{"kind":"list like","value":[],"string":"[]"},"license_type":{"kind":"string","value":"no_license"},"repo_name":{"kind":"string","value":"Tomlong/MLlib-UI"},"repo_url":{"kind":"string","value":"https://github.com/Tomlong/MLlib-UI"},"snapshot_id":{"kind":"string","value":"f615f8926165e0ce57f07191a692403d0fcc15e5"},"revision_id":{"kind":"string","value":"338e6c50ce736dee963cb300011d6618d7d5801d"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2021-01-02T09:33:24.119051","string":"2021-01-02T09:33:24.119051"},"revision_date":{"kind":"timestamp","value":"2014-08-29T02:40:53","string":"2014-08-29T02:40:53"},"committer_date":{"kind":"timestamp","value":"2014-08-29T02:40:53","string":"2014-08-29T02:40:53"},"github_id":{"kind":"number","value":23449998,"string":"23,449,998"},"star_events_count":{"kind":"number","value":4,"string":"4"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"import sys\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom normalize import norm\nfrom sklearn.decomposition import PCA\nfrom pyspark import SparkContext\nfrom pyspark.mllib.regression import LabeledPoint as lbp\nfrom pyspark.mllib.classification import SVMWithSGD as svm\n\ndef SVMModel(dataPath, label, max_label, min_label, character, master, normalize, ispca):\n \n pca_n = 2\n sc = SparkContext(master)\n data = sc.textFile(dataPath)\n \n mid_label = (float(max_label) + float(min_label)) / 2.0\n\n print data.map(lambda line: line.split(character)).collect()\n \n ndata = data.map(lambda line: line.split(character)).map(lambda part: (map(lambda x: float(x) ,part[0: len(part)])))\n\n if label == 0:\n ndata = ndata.map(lambda line: line[::-1])\n\n if normalize == 1:\n test_data = norm(ndata.collect()) \n norm_data = sc.parallelize(test_data)\n train_data = norm_data.map(lambda part: lbp([1.0 if float(part[0]) > mid_label else 0.0][0], part[1])) \n test_data = norm_data.map(lambda part: ([1.0 if float(part[0]) > mid_label else 0.0][0], part[1])).collect()\n\n else:\n train_data = ndata.map(lambda part: lbp([1.0 if float(len(part) - 1) > mid_label else 0.0][0], part[0: len(part) - 1]))\n test_data = ndata.map(lambda part: ([1.0 if float(part[len(part) - 1]) > mid_label else 0.0][0], part[0:len(part) - 1])).collect()\n\n if ispca == 1:\n pca = PCA(n_components = pca_n)\n pca_train = [test_data[i][1] for i in range(len(test_data))]\n pca_data = pca.fit(pca_train).transform(pca_train)\n\n test = []\n for i in range(len(pca_data)):\n test.append([test_data[i][0], pca_data[i]])\n\n train_data = sc.parallelize(test).map(lambda part: lbp(part[0], part[1]))\n test_data = test\n \n\n\n model_svm = svm.train(train_data)\n acc_svm = 0\n err_lrg = 0.0\n size = len(train_data.collect())\n \n for i in range(size):\n if model_svm.predict(test_data[i][1]) == test_data[i][0]:\n acc_svm += 1\n \n String = \"SVM Result:\\n\"\n String = String + str(model_svm.weights) + \"\\n\"\n String = String + str((float(acc_svm)/ float(size)) * 100) + \"%\"\n \n\n x = []\n y = []\n showpic = 0\n\n if len(test_data[0][1]) == 2:\n ispca = 1\n\n if ispca == 1:\n for i in range(size): \n if test_data[i][0] == 0.0: \n plt.plot(test_data[i][1][0], test_data[i][1][1], 'ro', color = 'r', markersize = 8)\n elif test_data[i][0] == 1.0:\n plt.plot(test_data[i][1][0], test_data[i][1][1], 'ro', color = 'b', markersize = 8)\n\n test = sc.parallelize(test_data)\n max_axis = test.map(lambda part: part[1][0]).max()\n min_axis = test.map(lambda part: part[1][0]).min()\n plt.plot([min_axis, max_axis], [max_axis * model_svm.weights[0] + model_svm.weights[1], min_axis * model_svm.weights[0] + model_svm.weights[1]], 'g-', linewidth= 2)\n plt.savefig('result.jpg')\n plt.close('all')\n showpic = 1\n\n sc.stop()\n return (showpic, String)\n\n"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2014,"string":"2,014"}}},{"rowIdx":1087,"cells":{"__id__":{"kind":"number","value":747324355846,"string":"747,324,355,846"},"blob_id":{"kind":"string","value":"ad85606eb94dd4a76c57796e2e87f7e772f1a892"},"directory_id":{"kind":"string","value":"2248c356664df8ddede6cddcc6c7e62772551117"},"path":{"kind":"string","value":"/python/lang_sample.py"},"content_id":{"kind":"string","value":"4c67a233e6d6ec8145cafe3919e5c166891a532e"},"detected_licenses":{"kind":"list like","value":[],"string":"[]"},"license_type":{"kind":"string","value":"no_license"},"repo_name":{"kind":"string","value":"abishop42/sample"},"repo_url":{"kind":"string","value":"https://github.com/abishop42/sample"},"snapshot_id":{"kind":"string","value":"d414fda94b4a07ab972ecf8194e4a5eddeee5cf3"},"revision_id":{"kind":"string","value":"4d7e9f0ee402f77acfd76458d07524338da33bc1"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2021-01-10T14:46:05.497080","string":"2021-01-10T14:46:05.497080"},"revision_date":{"kind":"timestamp","value":"2014-10-27T18:35:53","string":"2014-10-27T18:35:53"},"committer_date":{"kind":"timestamp","value":"2014-10-27T18:35:53","string":"2014-10-27T18:35:53"},"github_id":{"kind":"null"},"star_events_count":{"kind":"number","value":0,"string":"0"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"\nequals_opertators = [\"is\", \"==\",\"equal\",\"=\"]\n\nopertators = [\"is\",\"not\",\"greater\",\"than\",\"less\",\"equal\",\"to\", \"<\", \"<=\", \"==\", \">\", \">=\", \"!=\", \"like\", \"contains\",\"equal\",\"=\"]\ncombines = [\"and\", \"or\"]\n\nclass QuerryString():\n\n\tdef __init__(self,ops=[],com=[]):\n\t\tself.opertators = ops\n\t\tself.combines = com\n\t\tself.query = \"\"\n\n\tdef parse(self, input_string, delim=' '):\n\t\tresult = []\n\t\tinitial = input_string.split(delim)\n\n\t\tsplit_pos = [i for i in range(len(initial)) if initial[i] in combines]\n\n\t\ttemp = []\n\n\t\topp = [\"and\",\"or\"]\n\n\t\tpart = query.split(' ')\n\t\tm = [i for i in range(len(part)) if part[i].lower() in opp]\n\t\tm.append(-1)\n\t\tpos = 0\n\t\t\n\t\tfor i in m:\n\t\t\tif part[pos] in opp:\n\t\t\t\ttemp.append(part[pos])\n\t\t\t\tpos = pos + 1\n\n\t\t\tif i == -1:\n\t\t\t\ttemp.append(part[pos:])\n\t\t\telse:\n\t\t\t\ttemp.append(part[pos:i])\n\t\t\tpos = i\n\n\t\tresult = []\n\t\tfor t in temp:\n\t\t\tresult.append(self.combine_operators(t))\t\t\n\n\t\tself.query = result\n\t\treturn result\n\n\tdef combine_operators(self, input_object):\n\t\tresult = input_object\n\t\tif type([]) == type(input_object):\n\t\t\tresult = []\n\t\t\topp = []\n\t\t\tfor i in range(len(input_object)):\n\t\t\t\tif input_object[i] in opertators: \n\t\t\t\t\topp.append(input_object[i])\n\t\t\t\telse:\n\t\t\t\t\tif len(opp) > 0:\n\t\t\t\t\t\tresult.append(\" \".join(opp))\n\t\t\t\t\t\topp = []\n\t\t\t\t\tresult.append(input_object[i])\n\t\treturn result\n\n\tdef check_object(self, obj):\n\t\tresult = []\n\t\t\n\t\tfor o in obj:\n\t\t\tstatus = []\n\t\t\tif type(o) == type({}):\n\t\t\t\tfor q in self.query:\n\t\t\t\t\tif type(q) == type([]):\n\t \t\t\t\t\tif q[1] in [\"is\",\"equal\",\"==\", \"=\"]:\n\t \t\t\t\t\t\tstatus.append(\"%s\" % (o[q[0]] == q[2]))\n\t \t\t\t\t\telif q[1] in [\"is not\",\"!=\"]:\n\t \t\t\t\t\t\tstatus.append(\"%s\" % (not(o[q[0]] == q[2])))\n\t \t\t\t\t\telif q[1] in [\"greater than\", \">\"]:\n\t \t\t\t\t\t\tstatus.append(\"%s\" % (float(o[q[0]]) > float(q[2])))\n\t \t\t\t\t\telif q[1] in [\"greater than equal to\", \">=\"]:\n\t \t\t\t\t\t\tstatus.append(\"%s\" % (float(o[q[0]]) >= float(q[2])))\n\t \t\t\t\t\telif q[1] in [\"less than\", \"<\"]:\n\t \t\t\t\t\t\tstatus.append(\"%s\" % (float(o[q[0]]) > float(q[2])))\n\t \t\t\t\t\telif q[1] in [\"less than equal to\", \"<=\"]:\n\t \t\t\t\t\t\tstatus.append(\"%s\" % (float(o[q[0]]) <= float(q[2])))\n\t\t\t\t\telif q in [\"and\", \"or\"]:\n\t\t\t\t\t\tstatus.append(q)\n\n\t\t\tprint (\"overall => \" + \" \".join(status))\n\t\t\toverall = eval(\" \".join(status))\n\t\t\tprint (overall)\n\t\t\tif overall == True:\n\t\t\t\tresult.append(o)\n\n\t\treturn result\n\n\nif __name__ == \"__main__\":\n\tinput_object = [\n\t\t{\"input_key\":\"1111\",\"name\":\"fred\",\"address\":\"some street\", \"suburb\":\"aplace\",\"postcode\":\"9999\"},\n\t\t{\"input_key\":\"1112\",\"name\":\"fred\",\"address\":\"some street\", \"suburb\":\"aplace\",\"postcode\":\"9991\"},\n\t\t{\"input_key\":\"1112\",\"name\":\"blah\",\"address\":\"some street\", \"suburb\":\"donuts\",\"postcode\":\"9991\"},\n\t\t{\"input_key\":\"2222\",\"name\":\"fred\",\"address\":\"some street\", \"suburb\":\"donuts\",\"postcode\":\"9992\"}\n\t\t]\n\n\tquery = \"postcode == 9992 and name == blah or suburb = donuts\"\n\n\tfor i in input_object:\n\t\tprint (i)\n\n\tq = QuerryString(opertators, combines)\n\tprint(q.parse(query))\n\tprint (\"*** RESULTS ***\")\n\tfor r in q.check_object(input_object):\n\t\tprint (r)"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2014,"string":"2,014"}}},{"rowIdx":1088,"cells":{"__id__":{"kind":"number","value":1460288903810,"string":"1,460,288,903,810"},"blob_id":{"kind":"string","value":"d3d95ab1e7e1cf6069382faff2db6ebb644744fd"},"directory_id":{"kind":"string","value":"45af6ec2ef1444817e64a1c27c781e23a52f744d"},"path":{"kind":"string","value":"/upload_all_the_things/challenge1.py"},"content_id":{"kind":"string","value":"1d1d0c3b12f788dd6cee968746925d47c3e9f3d6"},"detected_licenses":{"kind":"list like","value":[],"string":"[]"},"license_type":{"kind":"string","value":"no_license"},"repo_name":{"kind":"string","value":"Trietptm-on-Coding-Algorithms/2014_Fall_CTF_Week_1"},"repo_url":{"kind":"string","value":"https://github.com/Trietptm-on-Coding-Algorithms/2014_Fall_CTF_Week_1"},"snapshot_id":{"kind":"string","value":"130e791bfc3f6fd331a0a6d88c74ec8c8bdb835c"},"revision_id":{"kind":"string","value":"193272ec6b42fbb48e07e3a9423c6f26bbaa1760"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2020-05-14T12:10:07.620717","string":"2020-05-14T12:10:07.620717"},"revision_date":{"kind":"timestamp","value":"2014-09-04T00:53:44","string":"2014-09-04T00:53:44"},"committer_date":{"kind":"timestamp","value":"2014-09-04T00:53:44","string":"2014-09-04T00:53:44"},"github_id":{"kind":"null"},"star_events_count":{"kind":"number","value":0,"string":"0"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"from flask import Flask, render_template, redirect, request, send_from_directory\nfrom flask_wtf import Form\nfrom wtforms import StringField\nfrom wtforms.validators import DataRequired\nfrom werkzeug import secure_filename\nfrom flask_wtf.file import FileField\nimport glob\n\napp = Flask(__name__)\n\napp.debug = True\napp.secret_key = \"ALNFNAI*)*@$)(NMD)(N@D)(J)(@E\"\n\nclass MyForm(Form):\n\tname = StringField('name', validators=[DataRequired()])\n\nclass PhotoForm(Form):\n photo = FileField('Your photo')\n\n@app.route('/upload/', methods=('GET', 'POST'))\ndef upload():\n form = PhotoForm()\n if form.validate_on_submit():\n filename = form.photo.data.filename\n form.photo.data.save('uploads/' + filename)\n else:\n filename = None\n list_of_files = glob.glob('uploads/*.png') \n return render_template('upload.html', form=form, filename=filename, files=list_of_files)\n\n@app.route('/uploads/')\ndef send_file(filename):\n return send_from_directory('uploads/', filename)\n\n@app.route('/')\ndef hello_world():\n\treturn 'Hello World'\n\n\nif __name__ == '__main__':\n\tapp.run(host='0.0.0.0')\n\n"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2014,"string":"2,014"}}},{"rowIdx":1089,"cells":{"__id__":{"kind":"number","value":12515534707046,"string":"12,515,534,707,046"},"blob_id":{"kind":"string","value":"ce5276c99b542d7419106349c3ad19e33e342de4"},"directory_id":{"kind":"string","value":"a559ed0cd9aa8f1c650c5f2a073f7effc7061e73"},"path":{"kind":"string","value":"/pymegacli.py"},"content_id":{"kind":"string","value":"9d7be4f891e058a2496926cbb58c3a9d7c42fa42"},"detected_licenses":{"kind":"list like","value":[],"string":"[]"},"license_type":{"kind":"string","value":"no_license"},"repo_name":{"kind":"string","value":"williamjoy/pymegacli"},"repo_url":{"kind":"string","value":"https://github.com/williamjoy/pymegacli"},"snapshot_id":{"kind":"string","value":"754f8357aedc9659363277dca694858f959952db"},"revision_id":{"kind":"string","value":"b472496ad8b31f02a7cdb6c60ade3154e928420a"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2019-01-01T04:34:57.316992","string":"2019-01-01T04:34:57.316992"},"revision_date":{"kind":"timestamp","value":"2010-07-08T11:39:08","string":"2010-07-08T11:39:08"},"committer_date":{"kind":"timestamp","value":"2010-07-08T11:39:08","string":"2010-07-08T11:39:08"},"github_id":{"kind":"number","value":34435299,"string":"34,435,299"},"star_events_count":{"kind":"number","value":0,"string":"0"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"#\n# Copyright (c) 2010, Giovanni P. Tirloni \n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# - Redistributions of source code must retain the above copyright notice,\n# this list of conditions and the following disclaimer.\n#\n# - Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\n# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\n# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\nfrom subprocess import Popen, PIPE\n\nMEGACLI='/usr/local/sas/utilities/MegaCli'\n\nclass Adapter:\n device_id = 0\n product_name = ''\n serial_number = ''\n fw_package_build = ''\n fw_version = ''\n bios_version = ''\n webbios_version = ''\n preboot_cli_version = ''\n boot_block_version = ''\n sas_address = ''\n bbu_present = False\n alarm_present = False\n nvram_present = False\n serial_debugger_present = False\n memory_present = False\n flash_present = False\n memory_size = ''\n\n def load(self, adapter_id):\n\n try:\n ret = megacli('-AdpAllInfo -a%i -NoLog' % adapter_id)\n except OSError:\n print 'Failed to get adapter information (MegaCli -AdpAllInfo)'\n return 0\n\n for line in ret.readlines():\n if line[0:9] == 'Adapter #':\n self.device_id = int(clean_nl(line[9:]))\n if line[0:12] == 'Product Name':\n self.product_name = clean_nl(line[18:])\n elif line[0:9] == 'Serial No':\n self.serial_number = clean_nl(line[18:])\n elif line[0:16] == 'FW Package Build':\n self.fw_package_build = clean_nl(line[18:])\n elif line[0:10] == 'FW Version':\n self.fw_version = clean_nl(line[21:])\n elif line[0:12] == 'BIOS Version':\n self.bios_version = clean_nl(line[21:])\n elif line[0:15] == 'WebBIOS Version':\n self.webbios_version = clean_nl(line[21:])\n elif line[0:19] == 'Preboot CLI Version':\n self.preboot_cli_version = clean_nl(line[21:])\n elif line[0:18] == 'Boot Block Version':\n self.boot_block_version = clean_nl(line[21:])\n elif line[0:11] == 'SAS Address':\n self.sas_address = clean_nl(line[18:])\n elif line[0:3] == 'BBU':\n self.bbu_present = is_present(line[18:])\n elif line[0:5] == 'Alarm':\n self.alarm_present = is_present(line[18:])\n elif line[0:5] == 'NVRAM':\n self.nvram_present = is_present(line[18:])\n elif line[0:15] == 'Serial Debugger':\n self.serial_debugger_present = is_present(line[18:])\n elif line[0:8] == 'Memory ':\n self.memory_present = is_present(line[18:])\n elif line[0:11] == 'Memory Size':\n self.memory_size = clean_nl(line[18:])\n\n def show(self):\n\n ret = \"\"\"Device ID : %d\nProduct Name : %s\nSerial Number : %s\nFW Package Build : %s\nFW Version : %s\nBIOS Version : %s\nWebBIOS Version : %s\nPreboot CLI Version : %s\nBoot Block Version : %s\nSAS Address : %s\nBBU Present : %s\nAlarm Present : %s\nNVRAM Present : %s\nSerial Debugger Present : %s\nMemory Present : %s\nFlash Present : %s\nMemory Size : %s\"\"\" % (self.device_id, self.product_name, \\\n self.serial_number, self.fw_package_build, self.fw_version, \\\n self.bios_version, self.webbios_version, self.preboot_cli_version, \\\n self.boot_block_version, self.sas_address, self.bbu_present, \\\n self.alarm_present, self.nvram_present, self.serial_debugger_present, \\\n self.memory_present, self.flash_present, self.memory_size)\n\n print ret\n\n\nclass Enclosure:\n device_id = 0\n number_of_slots = 0\n number_of_power_supplies = 0\n number_of_fans = 0\n number_of_temperature_sensors = 0\n number_of_alarms = 0\n number_of_sim_modules = 0\n number_of_physical_drives = 0\n status = ''\n position = 0\n connector_name = ''\n partner_device_id = 0\n\n def load_from_text(self, input):\n\n for line in input:\n if line[4:13] == 'Device ID':\n self.device_id = int(clean_nl(line[36:]))\n if line[4:19] == 'Number of Slots':\n self.number_of_slots = int(clean_nl(line[36:]))\n elif line[4:28] == 'Number of Power Supplies':\n self.number_of_power_supplies = int(clean_nl(line[36:]))\n elif line[4:18] == 'Number of Fans':\n self.number_of_fans = int(clean_nl(line[36:]))\n elif line[4:33] == 'Number of Temperature Sensors':\n self.number_of_temperature_sensors = int(clean_nl(line[36:]))\n elif line[4:20] == 'Number of Alarms':\n self.number_of_alarms = int(clean_nl(line[36:]))\n elif line[4:25] == 'Number of SIM Modules':\n self.number_of_sim_modules = int(clean_nl(line[36:]))\n elif line[4:29] == 'Number of Physical Drives':\n self.number_of_physical_drives = int(clean_nl(line[36:]))\n elif line[4:10] == 'Status':\n self.status = clean_nl(line[36:])\n elif line[4:12] == 'Position':\n self.position = clean_nl(line[36:])\n elif line[4:18] == 'Connector Name':\n self.connector_name = clean_nl(line[36:])\n elif line[4:21] == 'Partner Device Id':\n self.partner_device_id = int(clean_nl(line[36:]))\n\n def show(self):\n\n ret = \"\"\"Device ID : %i\nNumber of Slots : %i\nNumber of Power Supplies : %i\nNumber of Fans : %i\nNumber of Temperature Sensors : %i\nNumber of Alarms : %i\nNumber of SIM Modules : %i\nNumber of Physical Drives : %i\nStatus : %s\nPosition : %s\nConnector Name : %s\nPartner Device Id : %i\"\"\" % (self.device_id, self.number_of_slots, \\\n self.number_of_power_supplies, self.number_of_fans, \\\n self.number_of_temperature_sensors, self.number_of_alarms, \\\n self.number_of_sim_modules, self.number_of_physical_drives, \\\n self.status, self.position, self.connector_name, self.partner_device_id)\n\n print ret\n\n\nclass PhysicalDevice:\n adapter_id = 0\n enclosure_id = 0\n slot_id = 0\n device_id = 0\n sequence_number = 0\n media_errors = 0\n other_errors = 0\n predictive_failures = 0\n last_predictive_seq_number = 0\n pd_type = ''\n raw_size = ''\n non_coerced_size = ''\n coerced_size = ''\n firmware_state = ''\n sas_address = ''\n connected_port_number = ''\n inquiry_data = ''\n fde_capable = ''\n fde_enable = ''\n secured = ''\n locked = ''\n foreign_state = ''\n device_speed = ''\n link_speed = ''\n media_type = ''\n\n def led_on(self):\n\n try:\n ret = megacli('-PdLocate -Start -PhysDrv[%i:%i] -a%i'\n % (self.enclosure_id, self.slot_id, self.adapter_id))\n except OSError:\n print 'Failed to turn location LED on (MegaCli -PdLocate -Start)'\n return False\n\n return True\n\n\n def led_off(self):\n\n try:\n ret = megacli('-PdLocate -Stop -PhysDrv[%i:%i] -a%i'\n % (self.enclosure_id, self.slot_id, self.adapter_id))\n except OSError:\n print 'Failed to turn location LED on (MegaCli -PdLocate -Stop)'\n return False\n\n return True\n\n\n def load_from_text(self, input):\n\n for line in input:\n\n if line[0:19] == 'Enclosure Device ID':\n self.enclosure_id = int(clean_nl(line[21:]))\n if line[0:11] == 'Slot Number':\n self.slot_id = int(clean_nl(line[13:]))\n elif line[0:9] == 'Device Id':\n self.device_id = int(clean_nl(line[11:]))\n elif line[0:15] == 'Sequence Number':\n self.sequence_number = int(clean_nl(line[17:]))\n elif line[0:17] == 'Media Error Count':\n self.media_errors = int(clean_nl(line[19:]))\n elif line[0:17] == 'Other Error Count':\n self.other_errors = int(clean_nl(line[19:]))\n elif line[0:24] == 'Predictive Failure Count':\n self.predictive_failures = int(clean_nl(line[26:]))\n elif line[0:40] == 'Last Predictive Failure Event Seq Number':\n self.last_predictive_failure_seq_number = int(clean_nl(line[42:]))\n elif line[0:7] == 'PD Type':\n self.pd_type = clean_nl(line[9:])\n elif line[0:8] == 'Raw Size':\n delim = line.find('[') - 4\n self.raw_size = float(clean_nl(line[10:delim]))\n elif line[0:16] == 'Non Coerced Size':\n delim = line.find('[') - 4\n self.non_coerced_size = float(clean_nl(line[18:delim]))\n elif line[0:12] == 'Coerced Size':\n delim = line.find('[') - 4\n self.coerced_size = float(clean_nl(line[14:delim]))\n elif line[0:14] == 'Firmware state':\n self.firmware_state = clean_nl(line[16:])\n elif line[0:11] == 'SAS Address':\n self.sas_address = clean_nl(line[16:])\n elif line[0:21] == 'Connected Port Number':\n self.connected_port_number = clean_nl(line[23:])\n elif line[0:12] == 'Inquiry Data':\n self.inquiry_data = clean_nl(line[14:])\n elif line[0:11] == 'FDE Capable':\n self.fde_capable = clean_nl(line[13:])\n elif line[0:10] == 'FDE Enable':\n self.fde_enable = clean_nl(line[12:])\n elif line[0:7] == 'Secured':\n self.secured = clean_nl(line[9:])\n elif line[0:6] == 'Locked':\n self.locked = clean_nl(line[8:])\n elif line[0:13] == 'Foreign State':\n self.foreign_state = clean_nl(line[15:])\n elif line[0:12] == 'Device Speed':\n self.device_speed = clean_nl(line[14:])\n elif line[0:10] == 'Link Speed':\n self.link_speed = clean_nl(line[12:])\n elif line[0:10] == 'Media Type':\n self.media_type = clean_nl(line[12:])\n\n def load(self, adapter_id, enclosure_id, slot_id):\n\n try:\n ret = megacli('-PdInfo -PhysDrv[%i:%i] -a%i' % (enclosure_id, slot_id, adapter_id))\n except OSError:\n print 'Failed to get physical device information (MegaCli -PdInfo)'\n return []\n\n self.adapter_id = adapter_id\n ret_lines = ret.readlines()\n self.load_from_text(ret_lines)\n\n def show(self):\n\n ret = \"\"\"Adapter ID: %s\nEnclosure Device ID: %s\nSlot Number: %s\nDevice Id: %s\nSequence Number: %s\nMedia Error Count: %s\nOther Error Count: %s\nPredictive Failure Count: %s\nLast Predictive Failure Event Seq Number: %s\nPD Type: %s\nRaw Size: %s\nNon Coerced Size: %s\nCoerced Size: %s\nFirmware state: %s\nSAS Address(0): %s\nConnected Port Number: %s\nInquiry Data: %s\nFDE Capable: %s\nFDE Enable: %s\nSecured: %s\nLocked: %s\nForeign State: %s\nDevice Speed: %s\nLink Speed: %s\nMedia Type: %s\"\"\" % (self.adapter_id, self.enclosure_id, self.slot_id, self.device_id, \\\n self.sequence_number, self.media_errors, self.other_errors, \\\n self.predictive_failures, \\\n self.last_predictive_seq_number, \\\n self.pd_type, self.raw_size, self.non_coerced_size, \\\n self.coerced_size, \\\n self.firmware_state, self.sas_address, self.connected_port_number, \\\n self.inquiry_data, self.fde_capable, self.fde_enable, \\\n self.secured, self.locked, self.foreign_state, self.device_speed, \\\n self.link_speed, self.media_type)\n\n print ret\n\n\nclass VirtualDrive:\n virtualdisk_id = 0\n name = ''\n raid_level = ''\n size = ''\n state = ''\n stripe_size = ''\n number_of_drives = 0\n span_depth = 0\n default_cache_policy = ''\n current_cache_policy = ''\n access_policy = ''\n disk_cache_policy = ''\n encryption = ''\n\n\n def load_from_text(self, input):\n\n for line in input:\n if line[0:12] == 'Virtual Disk':\n offset = line.find('(')\n self.virtualdisk_id = int(clean_nl(line[14:offset-1]))\n if line[0:4] == 'Name':\n self.name = clean_nl(line[6:])\n elif line[0:10] == 'RAID Level':\n self.raid_level = clean_nl(line[12:])\n elif line[0:4] == 'Size':\n delim = line.find(' GB')\n self.size = clean_nl(line[5:delim])\n elif line[0:5] == 'State':\n self.state = clean_nl(line[7:])\n elif line[0:11] == 'Stripe Size':\n delim = line.find(' KB')\n self.stripe_size = clean_nl(line[13:delim])\n elif line[0:16] == 'Number Of Drives':\n self.number_of_drives = int(clean_nl(line[17:]))\n elif line[0:10] == 'Span Depth':\n self.span_depth = int(clean_nl(line[11:]))\n elif line[0:20] == 'Default Cache Policy':\n self.default_cache_policy = clean_nl(line[22:])\n elif line[0:20] == 'Current Cache Policy':\n self.current_cache_policy = clean_nl(line[22:])\n elif line[0:13] == 'Access Policy':\n self.access_policy = clean_nl(line[15:])\n elif line[0:17] == 'Disk Cache Policy':\n self.disk_cache_policy = clean_nl(line[19:])\n elif line[0:10] == 'Encryption':\n self.encryption = clean_nl(line[12:])\n\n\n def show(self):\n\n ret = \"\"\"Virtual Disk: %d\nName: %s\nRAID Level: %s\nSize: %s\nState: %s\nStrip Size: %s\nNumber Of Drives: %d\nSpan Depth: %d\nDefault Cache Policy: %s\nCurrent Cache Policy: %s\nAccess Policy: %s\nDisk Cache Policy: %s\nEncryption: %s\"\"\" % (self.virtualdisk_id, self.name, self.raid_level, \\\n self.size, self.state, self.stripe_size, self.number_of_drives, \\\n self.span_depth, self.default_cache_policy, self.current_cache_policy, \\\n self.access_policy, self.disk_cache_policy, self.encryption)\n\n print ret\n\n\ndef adp_list():\n\n try:\n ret = megacli('-AdpCount -NoLog')\n except OSError:\n print 'Failed to get adapter count (MegaCli -AdpCount)'\n return []\n\n adp_count = 0\n\n for line in ret.readlines():\n if line[0:16] == 'Controller Count':\n adp_count = int(clean_nl(line[17:]).replace('.',''))\n\n adp_list = []\n adp = Adapter()\n\n for adp_id in range(0, adp_count):\n adp.load(adp_id)\n adp_list.append(adp)\n adp = Adapter()\n\n return adp_list\n\n\ndef enc_list(adapter_id):\n\n try:\n ret = megacli('-EncInfo -a%i' % adapter_id)\n except OSError:\n print 'Failed to get enclosure information (MegaCli -EncInfo)'\n return []\n\n ret_lines = ret.readlines()\n\n enc_list = []\n enc = Enclosure()\n\n # Go through all lines looking for the Enclosure identifier line\n for line in range(0, len(ret_lines)):\n if ret_lines[line][0:13] == ' Enclosure':\n\n # Feed the enclosure's block of text to the Enclosure object\n enc.load_from_text(ret_lines[line:line+13])\n\n # Add Enclosure to the enc_list and reset it\n enc_list.append(enc)\n enc = Enclosure()\n\n return enc_list\n\n\ndef pd_list(adapter_id):\n\n try:\n ret = megacli('-PdList -a%i' % adapter_id)\n except OSError:\n print 'Failed to get physical device information (MegaCli -PdList)'\n return []\n\n ret_lines = ret.readlines()\n\n pd_list = []\n pd = PhysicalDevice()\n\n # Go through all lines looking for the first line in the disk info\n for line in range(0, len(ret_lines)):\n if ret_lines[line][0:19] == 'Enclosure Device ID':\n\n # Feed disk info to the PhysicalDevice object\n pd.load_from_text(ret_lines[line:line+24])\n\n # Add PhysicalDevice to the pd_list and reset it\n pd_list.append(pd)\n pd = PhysicalDevice()\n\n return pd_list\n\n\ndef vd_list(adapter_id):\n\n try:\n ret = megacli('-LdInfo -Lall -a%i' % adapter_id)\n except OSError:\n print 'Failed to get virtual drive information (MegaCli -LDInfo -Lall)'\n return []\n\n ret_lines = ret.readlines()\n\n vd_list = []\n vd = VirtualDrive()\n\n # Go through all lines looking for the Virtual Disk line\n for line in range(0, len(ret_lines)):\n if ret_lines[line][0:12] == 'Virtual Disk':\n\n # Feed the virtual drive's block of text to the VirtualDrive object\n vd.load_from_text(ret_lines[line:line+13])\n\n # Add VirtualDrive to the vd_list and create a new one\n vd_list.append(vd)\n vd = VirtualDrive()\n\n return vd_list\n\ndef clean_nl(str):\n return str.replace('\\n', '')\n\ndef is_present(str):\n if clean_nl(str) == 'Present':\n return True\n else:\n return False\n\ndef megacli(args):\n cmd = MEGACLI + ' ' + args\n out = Popen(cmd, shell=True, stdout=PIPE).stdout\n return out"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2010,"string":"2,010"}}},{"rowIdx":1090,"cells":{"__id__":{"kind":"number","value":15032385545341,"string":"15,032,385,545,341"},"blob_id":{"kind":"string","value":"f3065e0565aa23cd7395124fc9103288ae40a691"},"directory_id":{"kind":"string","value":"634bc610b84b3af0d3280106910f050a653b2dc8"},"path":{"kind":"string","value":"/setup.py"},"content_id":{"kind":"string","value":"fe5a56d306ef15385319c830fde7ddeb5ab43136"},"detected_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"license_type":{"kind":"string","value":"permissive"},"repo_name":{"kind":"string","value":"mabotech/mabolab"},"repo_url":{"kind":"string","value":"https://github.com/mabotech/mabolab"},"snapshot_id":{"kind":"string","value":"37aec8d501880ebd398a63d099aebe3f3c46dd94"},"revision_id":{"kind":"string","value":"9d70781e438d5597cbb98e3ff3702658036262a3"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2020-06-04T17:48:03.790904","string":"2020-06-04T17:48:03.790904"},"revision_date":{"kind":"timestamp","value":"2014-04-05T08:37:07","string":"2014-04-05T08:37:07"},"committer_date":{"kind":"timestamp","value":"2014-04-05T08:37:07","string":"2014-04-05T08:37:07"},"github_id":{"kind":"null"},"star_events_count":{"kind":"number","value":0,"string":"0"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"import os\nimport sys\n\nfrom setuptools import setup, find_packages\n\nhere = os.path.abspath(os.path.dirname(__file__))\nREADME = open(os.path.join(here, 'README.txt')).read()\nCHANGES = open(os.path.join(here, 'CHANGES.txt')).read()\n\nrequires = [\n 'simplejson',\n 'SQLAlchemy', \n 'zope.interface',\n 'flask', \n 'pyro',\n ]\n\nsetup(name='mabolab',\n version='0.0.1',\n description='mabolab',\n long_description=README + '\\n\\n' + CHANGES,\n classifiers=[\n \"Programming Language :: Python\",\n \"Framework :: \",\n \"Topic :: Internet :: WWW/HTTP\",\n \"Topic :: Internet :: WWW/HTTP :: WSGI :: Application\",\n ],\n author='MaboTech',\n license='MIT',\n author_email='mabotech@163.com',\n url='http://www.mabotech.com',\n keywords='mabotech lab lib web',\n packages=find_packages(),\n include_package_data=True,\n zip_safe=False,\n test_suite='mabolab',\n install_requires = requires, \n #data_files=[]\n\n )\n\n"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2014,"string":"2,014"}}},{"rowIdx":1091,"cells":{"__id__":{"kind":"number","value":4947802333073,"string":"4,947,802,333,073"},"blob_id":{"kind":"string","value":"71d3f222bb8b4123cbd5da9bb086934a54af9771"},"directory_id":{"kind":"string","value":"a0a6cdce9eea9700f9f6bc814eb1e87272db8c18"},"path":{"kind":"string","value":"/ciem/apps/homepage/feeds.py"},"content_id":{"kind":"string","value":"33598921d8f3154c58b1c7744bf3726b29dac453"},"detected_licenses":{"kind":"list like","value":[],"string":"[]"},"license_type":{"kind":"string","value":"no_license"},"repo_name":{"kind":"string","value":"gustav0/ciem"},"repo_url":{"kind":"string","value":"https://github.com/gustav0/ciem"},"snapshot_id":{"kind":"string","value":"2c4e56d5e495798153dfbe543367a778e0a0672a"},"revision_id":{"kind":"string","value":"5add67c735313cdd4118793ebe46c840219f490b"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2021-01-22T19:26:15.176098","string":"2021-01-22T19:26:15.176098"},"revision_date":{"kind":"timestamp","value":"2013-04-29T21:11:45","string":"2013-04-29T21:11:45"},"committer_date":{"kind":"timestamp","value":"2013-04-29T21:11:45","string":"2013-04-29T21:11:45"},"github_id":{"kind":"null"},"star_events_count":{"kind":"number","value":0,"string":"0"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"from django.contrib.syndication.views import Feed\r\n\r\nfrom ciem.apps.data.models import alimento\r\n\r\nclass archiveFeed(Feed):\r\n\tnombre = 'Archive Feed'\r\n\tdescription = 'Archive Feed'\r\n\tlink = '/archive/'\r\n\r\n\tdef items(self):\r\n\t\treturn alimento.objects.all()\r\n\r\n\tdef item_link(self, item):\r\n\t\treturn '/archive/'\r\n\r\n\tdef item_nombre(self, item):\r\n\t\treturn item.nombre\r\n\r\n\tdef item_description(self, item):\r\n\t\treturn 'hola'"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2013,"string":"2,013"}}},{"rowIdx":1092,"cells":{"__id__":{"kind":"number","value":4690104309711,"string":"4,690,104,309,711"},"blob_id":{"kind":"string","value":"5c5ff6236bd5ca183b8098be62f415a0615517ed"},"directory_id":{"kind":"string","value":"4654171674007e56fd5d56e8c4c99c0c35367f43"},"path":{"kind":"string","value":"/py/auction/time_utils.py"},"content_id":{"kind":"string","value":"b3e0ab528fe2383e6bd72e7e293ca5cdb7180c02"},"detected_licenses":{"kind":"list like","value":[],"string":"[]"},"license_type":{"kind":"string","value":"no_license"},"repo_name":{"kind":"string","value":"pscollins/auction"},"repo_url":{"kind":"string","value":"https://github.com/pscollins/auction"},"snapshot_id":{"kind":"string","value":"3ec38932f80834b503216202194c0fe521b297e2"},"revision_id":{"kind":"string","value":"b531459c08b16fa1cc0ad177990a78a9668acff8"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2021-01-18T18:18:36.069637","string":"2021-01-18T18:18:36.069637"},"revision_date":{"kind":"timestamp","value":"2014-08-06T07:19:12","string":"2014-08-06T07:19:12"},"committer_date":{"kind":"timestamp","value":"2014-08-06T07:19:12","string":"2014-08-06T07:19:12"},"github_id":{"kind":"null"},"star_events_count":{"kind":"number","value":0,"string":"0"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"import datetime\nfrom datetime import datetime as dt\nfrom dateutil import tz\nimport time\nimport calendar\nimport re\n\n\nUTC_TZ = tz.gettz('UTC')\nNY_TZ = tz.gettz('America/New_York')\nCHI_TZ = tz.gettz('America/Chicago')\nLOCAL_TZ = tz.tzlocal()\n__SUBSECOND_RESOLUTION__ = 1000000\n__DateRe__ = re.compile(r\"(\\d\\d\\d\\d)(\\d\\d)(\\d\\d)\")\n__CmeDateTimeRe__ = re.compile(r\"(\\d{4})(\\d{2})(\\d{2})(\\d{2})(\\d{2})(\\d{2})(\\d{3,5})\")\n\ndef start_of_date(year, month, day, tzinfo):\n \"\"\"\n Given date information and timezone, create a timestamp\n \"\"\"\n result = dt(year, month, day, tzinfo=tzinfo).astimezone(UTC_TZ)\n return timestamp_from_datetime(result)\n\ndef timestamp():\n \"\"\"\n Get a current timestamp\n \"\"\"\n now = dt.utcnow()\n return calendar.timegm(now.utctimetuple())*__SUBSECOND_RESOLUTION__ + now.microsecond\n\ndef datetime_from_timestamp(ts):\n \"\"\"\n Given a timestamp, create the corresponding datetime object\n \"\"\"\n return dt.fromtimestamp(float(ts)/__SUBSECOND_RESOLUTION__, UTC_TZ)\n\ndef timestamp_from_datetime(dt):\n \"\"\"\n Given a datetime in utc, create the corresponding timestamp\n \"\"\"\n return calendar.timegm(dt.utctimetuple())*__SUBSECOND_RESOLUTION__ + dt.microsecond\n\ndef timestamp_from_mtime(mt):\n \"\"\"\n Given a file mtime, create the corresponding timestamp\n \"\"\"\n return int(mt*__SUBSECOND_RESOLUTION__)\n\ndef datetime_from_cme_timestamp(ts_str):\n m = __CmeDateTimeRe__.match(ts_str)\n year, mon, day, hr, minutes, sec, millis = m.groups()\n len_millis = len(millis)\n if len_millis == 3:\n micros = int(millis) * 1000\n elif len_millis == 5:\n micros = int(millis) * 10\n elif len_millis == 4:\n micros = int(millis) * 100\n\n return datetime.datetime(int(year),int(mon),int(day),\n int(hr),int(minutes),int(sec),\n micros) \n\ndef timestamp_from_cme_timestamp(ts_str):\n return timestamp_from_datetime(datetime_from_cme_timestamp(ts_str))\n\ndef chicago_time(ts):\n \"\"\"\n Given a timestamp (as utc), get the corresponding chicago time\n \"\"\"\n stamp = dt.fromtimestamp(float(ts)/__SUBSECOND_RESOLUTION__, UTC_TZ)\n return stamp.astimezone(CHI_TZ)\n\ndef chicago_time_str(ts):\n return chicago_time(ts).strftime('%H:%M:%S.%f') if ts else 'Not Set'\n\ndef get_date_of_file(fileName):\n \"\"\"\n Given a filename with a date in it (YYYYMMDD), parse out the date\n Return None if no date present\n \"\"\"\n m = __DateRe__.search(fileName)\n if m:\n year, month, day = m.groups()\n return datetime.date(int(year), int(month), int(day))\n else:\n return None\n\ndef get_date_string(d):\n return datetime.date.isoformat(d).replace('-','')\n\nMonday = 0\nTuesday = 1\nWednesday = 2\nThursday = 3\nFriday = 4\nSaturday = 5\nSunday = 6\n\ndef get_previous_weekday(some_date, desired_day = Sunday):\n weekday = some_date.weekday()\n days_back = (desired_day - weekday - 7) if (desired_day > weekday) else (weekday - desired_day)\n return some_date + datetime.timedelta(days_back)\n\nif __name__ == \"__main__\":\n\n\n def make_timestamp(sod, seconds, millis):\n seconds = int(seconds)\n millis = int(millis)\n return sod + seconds*1000000 + millis*1000\n\n def grab_time():\n for i in range(10000):\n ts = timestamp()\n\n import sys\n now = timestamp()\n print timestamp(), timestamp(), timestamp()\n print \"now is \", datetime_from_timestamp(now)\n print \"chicago time is \", chicago_time(now).strftime('%H:%M:%S:%f')\n for i in range(20):\n t = now + i * (60*60*24*356*__SUBSECOND_RESOLUTION__)\n print \"chicago time:\", chicago_time(t), \" => \", int(t), \" vs \", sys.maxint\n\n grab_time()\n\n print chicago_time(1311325200095000)\n\n# sod = timestamp_from_datetime(datetime(2011, 7, 22))\n print \"CH sod\", start_of_date(2011, 7, 22, CHI_TZ)\n print \"NY sod\", start_of_date(2011, 7, 22, NY_TZ)\n\n print \"CH\", chicago_time(start_of_date(2011, 7, 22, CHI_TZ))\n print \"NY\", chicago_time(start_of_date(2011, 7, 22, NY_TZ))\n\n dtnow = dt.utcnow()\n print (calendar.timegm(dtnow.utctimetuple())*__SUBSECOND_RESOLUTION__), \"vs\", dtnow.microsecond\n print (calendar.timegm(dtnow.utctimetuple())*__SUBSECOND_RESOLUTION__ + dtnow.microsecond), \"vs\", dtnow.microsecond\n\n print chicago_time(1311321600730000)\n\n print \"Sample timestamp\", datetime_from_cme_timestamp('20120213183040306'),\n print \"in chicago\", chicago_time(timestamp_from_datetime(datetime_from_cme_timestamp('20120213183040306')))\n\n print \"UTC sod\", make_timestamp(start_of_date(2011, 7, 22, CHI_TZ), 28800, 741)\n print \"CH sod\", chicago_time(make_timestamp(start_of_date(2011, 7, 22, CHI_TZ), 28800, 741))\n\n print \"UTC sod\", make_timestamp(start_of_date(2011, 7, 22, NY_TZ), 14400, 730)\n print \"CH sod\", chicago_time(make_timestamp(start_of_date(2011, 7, 22, NY_TZ), 14400, 730))\n\n some_day = datetime.date(2001,1,1)\n print some_day, some_day.weekday()\n print get_previous_weekday(some_day, Saturday)\n\n print get_date_string(some_day)\n\n print chicago_time(timestamp_from_cme_timestamp(\"20081223195210641\"))\n print chicago_time(timestamp_from_cme_timestamp(\"2011102813300000175\"))\n print chicago_time(timestamp_from_cme_timestamp(\"201110281330000017\"))\n\n\n print chicago_time_str(timestamp_from_cme_timestamp(\"20081223195210641\"))\n print chicago_time_str(timestamp_from_cme_timestamp(\"2011102813300000175\"))\n print chicago_time_str(timestamp_from_cme_timestamp(\"20111028032135310\"))\n print chicago_time_str(timestamp_from_cme_timestamp(\"201110281330000017\"))\n\n\n"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2014,"string":"2,014"}}},{"rowIdx":1093,"cells":{"__id__":{"kind":"number","value":1486058723517,"string":"1,486,058,723,517"},"blob_id":{"kind":"string","value":"cd32a518713f3c2b8e1bfb58077ade58d7df3ebc"},"directory_id":{"kind":"string","value":"0264c852a317b4fd7888af59e5a42103ea8561d0"},"path":{"kind":"string","value":"/apps/words/words/__init__.py"},"content_id":{"kind":"string","value":"9b4985db2a966bc0d22fcb42c07bd824dcc37794"},"detected_licenses":{"kind":"list like","value":[],"string":"[]"},"license_type":{"kind":"string","value":"no_license"},"repo_name":{"kind":"string","value":"yimiqisan/qisan"},"repo_url":{"kind":"string","value":"https://github.com/yimiqisan/qisan"},"snapshot_id":{"kind":"string","value":"9f54998b0f33993ce248da4e6ded121c8ec7d038"},"revision_id":{"kind":"string","value":"8c0f70b23af0e6135e168f85bda5fbee4dffda1e"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2020-04-05T23:17:30.103940","string":"2020-04-05T23:17:30.103940"},"revision_date":{"kind":"timestamp","value":"2013-06-20T09:57:53","string":"2013-06-20T09:57:53"},"committer_date":{"kind":"timestamp","value":"2013-06-20T09:57:53","string":"2013-06-20T09:57:53"},"github_id":{"kind":"null"},"star_events_count":{"kind":"number","value":0,"string":"0"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"#!/usr/bin/env python\n# encoding: utf-8\n\"\"\"\n__init__.py\n\nCreated by 刘 智勇 on 2013-06-16.\nCopyright (c) 2013 __MyCompanyName__. All rights reserved.\n\"\"\"\n\nfrom flask import Flask\nfrom views import blueprint_site, blueprint_apis, blueprint_weixin\n\napp = Flask(__name__)\napp.register_blueprint(blueprint_site)\napp.register_blueprint(blueprint_apis)\napp.register_blueprint(blueprint_weixin)"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2013,"string":"2,013"}}},{"rowIdx":1094,"cells":{"__id__":{"kind":"number","value":3178275808409,"string":"3,178,275,808,409"},"blob_id":{"kind":"string","value":"775db518a29ab29cd51f07806d4ce514db4218a1"},"directory_id":{"kind":"string","value":"b1e7181dd2c7379e60ab1fdff954e68b597c6bfb"},"path":{"kind":"string","value":"/test_python_client.py"},"content_id":{"kind":"string","value":"21302fe42227baf50a98c56fad9a13b4d4492959"},"detected_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"license_type":{"kind":"string","value":"permissive"},"repo_name":{"kind":"string","value":"zed-throben/erlang-bertrpc-server"},"repo_url":{"kind":"string","value":"https://github.com/zed-throben/erlang-bertrpc-server"},"snapshot_id":{"kind":"string","value":"68a34cefde47ca4b847ffd5d1469d274245e970d"},"revision_id":{"kind":"string","value":"82e6456e6277e0677322ae4900079c8b1bc6e81d"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2020-06-15T06:53:35.612505","string":"2020-06-15T06:53:35.612505"},"revision_date":{"kind":"timestamp","value":"2014-07-13T22:02:46","string":"2014-07-13T22:02:46"},"committer_date":{"kind":"timestamp","value":"2014-07-13T22:02:46","string":"2014-07-13T22:02:46"},"github_id":{"kind":"number","value":20970686,"string":"20,970,686"},"star_events_count":{"kind":"number","value":1,"string":"1"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"# easy_install bertrpc\n\nimport bertrpc\nservice = bertrpc.Service('127.0.0.1', 9999)\n\nresponse = service.request('call').lists.reverse( [1, 2,3] )\nprint( response )\n"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2014,"string":"2,014"}}},{"rowIdx":1095,"cells":{"__id__":{"kind":"number","value":10969346509728,"string":"10,969,346,509,728"},"blob_id":{"kind":"string","value":"ed338cb090b88bab449a47294b435ded9515ffbf"},"directory_id":{"kind":"string","value":"81fc357f0849890c8ee3232d1202e6b8e1a59f64"},"path":{"kind":"string","value":"/autobt/manager.py"},"content_id":{"kind":"string","value":"0cc31f6c33232d31b8b473273a6e53cc0fda7a4a"},"detected_licenses":{"kind":"list like","value":[],"string":"[]"},"license_type":{"kind":"string","value":"no_license"},"repo_name":{"kind":"string","value":"patrick-lu/auto"},"repo_url":{"kind":"string","value":"https://github.com/patrick-lu/auto"},"snapshot_id":{"kind":"string","value":"a3f436818518e38d956ed3a1adf36d140c993276"},"revision_id":{"kind":"string","value":"bfd58c89c0c15428f066f630fcee294b10cd3c01"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2020-06-06T04:14:04.343237","string":"2020-06-06T04:14:04.343237"},"revision_date":{"kind":"timestamp","value":"2012-07-05T14:04:15","string":"2012-07-05T14:04:15"},"committer_date":{"kind":"timestamp","value":"2012-07-05T14:04:15","string":"2012-07-05T14:04:15"},"github_id":{"kind":"number","value":4414756,"string":"4,414,756"},"star_events_count":{"kind":"number","value":2,"string":"2"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"#!/usr/bin/env python\nimport zmq\n\nhost=\"127.0.0.1:6000\"\ncontext = zmq.Context()\nsocket = context.socket(zmq.REQ)\nsocket.connect(\"tcp://\"+host)\n\n#for i in range(10):\nmsg = {'cmd':'start_reply'}\nmsg = {'cmd':'start_work'}\n\t#msg = json.dumps(msg)\nsocket.send_json(msg)\nmsg_in = socket.recv();\n"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2012,"string":"2,012"}}},{"rowIdx":1096,"cells":{"__id__":{"kind":"number","value":7413113602533,"string":"7,413,113,602,533"},"blob_id":{"kind":"string","value":"4efd226e067cf648f2f622f19c8540092e5f2a85"},"directory_id":{"kind":"string","value":"28b13e5fff19c1fb83bc0e9ec9b00651241afc2e"},"path":{"kind":"string","value":"/openprofile/objects/__init__.py"},"content_id":{"kind":"string","value":"74526b90f73b137da3f3c86cdd9d5fbb3b64076b"},"detected_licenses":{"kind":"list like","value":["LicenseRef-scancode-warranty-disclaimer","LicenseRef-scancode-proprietary-license","LicenseRef-scancode-unknown-license-reference","CC-BY-NC-ND-3.0"],"string":"[\n \"LicenseRef-scancode-warranty-disclaimer\",\n \"LicenseRef-scancode-proprietary-license\",\n \"LicenseRef-scancode-unknown-license-reference\",\n \"CC-BY-NC-ND-3.0\"\n]"},"license_type":{"kind":"string","value":"non_permissive"},"repo_name":{"kind":"string","value":"koalalorenzo/OpenProfile"},"repo_url":{"kind":"string","value":"https://github.com/koalalorenzo/OpenProfile"},"snapshot_id":{"kind":"string","value":"7a69aae26e936649a91ffee8339b69fda56a6a2d"},"revision_id":{"kind":"string","value":"a292f0c56b1d90353eff93e825d7d2346f73c6ec"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2021-01-18T14:34:01.730900","string":"2021-01-18T14:34:01.730900"},"revision_date":{"kind":"timestamp","value":"2013-03-09T20:33:16","string":"2013-03-09T20:33:16"},"committer_date":{"kind":"timestamp","value":"2013-03-09T20:33:16","string":"2013-03-09T20:33:16"},"github_id":{"kind":"null"},"star_events_count":{"kind":"number","value":0,"string":"0"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"from openprofile.objects.Connection import Connection\nfrom openprofile.objects.Page import Page, PageNotFound\nfrom openprofile.objects.Message import Message\nfrom openprofile.objects.Profile import Profile, AdminNotFound\n"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2013,"string":"2,013"}}},{"rowIdx":1097,"cells":{"__id__":{"kind":"number","value":11570641906287,"string":"11,570,641,906,287"},"blob_id":{"kind":"string","value":"965a1951c45593d4c8ccdf03de6dab98b1a2cd86"},"directory_id":{"kind":"string","value":"49013a6493b803af396f6dd7645a360d2807e80b"},"path":{"kind":"string","value":"/src/neuroutils/__init__.py"},"content_id":{"kind":"string","value":"0e2b5bacf6ed2e23a77db43a63e4a1f36b8df236"},"detected_licenses":{"kind":"list like","value":[],"string":"[]"},"license_type":{"kind":"string","value":"no_license"},"repo_name":{"kind":"string","value":"lixiaolong19890207/neuroutils"},"repo_url":{"kind":"string","value":"https://github.com/lixiaolong19890207/neuroutils"},"snapshot_id":{"kind":"string","value":"2c1caf5b52283fe7652fa7aa2a158378aa69b620"},"revision_id":{"kind":"string","value":"85151c30ad43745352c6dc641707d42b867a5adf"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2021-05-28T08:25:06.613585","string":"2021-05-28T08:25:06.613585"},"revision_date":{"kind":"timestamp","value":"2012-02-27T12:24:51","string":"2012-02-27T12:24:51"},"committer_date":{"kind":"timestamp","value":"2012-02-27T12:24:51","string":"2012-02-27T12:24:51"},"github_id":{"kind":"null"},"star_events_count":{"kind":"number","value":0,"string":"0"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"from nipype.utils.config import config\nimport matplotlib\nmatplotlib.use(config.get(\"execution\", \"matplotlib_backend\"))\nfrom vis import Overlay, PsMerge, Ps2Pdf, PlotRealignemntParameters\nfrom threshold import ThresholdGGMM, CreateTopoFDRwithGGMM, ThresholdGMM, ThresholdFDR\nfrom simgen import SimulationGenerator\nfrom resampling import CalculateNonParametricFWEThreshold, CalculateProbabilityFromSamples, CalculateFDRQMap\nfrom bootstrapping import BootstrapTimeSeries, PermuteTimeSeries\nfrom bedpostx_particle_reader import Particle2Trackvis\nfrom annotate_tracks import AnnotateTracts\nfrom icc import ICC\n\nimport numpy as np\n\ndef estimate_fdr_and_fnr(true_pattern, exp_result):\n false_positives = sum(exp_result[true_pattern != 1] != 0)\n false_negatives = sum(exp_result[true_pattern != 0] == 0)\n all_positives = np.sum(exp_result != 0)\n all_negatives = np.sum(exp_result == 0)\n if all_positives == 0:\n fdr = 0\n else:\n fdr = float(false_positives)/float(all_positives)\n \n if all_negatives == 0:\n fnr = 0\n else:\n fnr = float(false_negatives)/float(all_negatives)\n return (fdr, fnr)"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2012,"string":"2,012"}}},{"rowIdx":1098,"cells":{"__id__":{"kind":"number","value":19473381757303,"string":"19,473,381,757,303"},"blob_id":{"kind":"string","value":"5d6d7e65c22043998ad7c0d233b4276525d4c115"},"directory_id":{"kind":"string","value":"8636807e06d87e4a190edd8e3e6f701b844cfce4"},"path":{"kind":"string","value":"/turkey.py"},"content_id":{"kind":"string","value":"3252d435aa0d08bba5e57a1edc4ef6b1bdb2a9ec"},"detected_licenses":{"kind":"list like","value":[],"string":"[]"},"license_type":{"kind":"string","value":"no_license"},"repo_name":{"kind":"string","value":"dbenamy/agent-plugins"},"repo_url":{"kind":"string","value":"https://github.com/dbenamy/agent-plugins"},"snapshot_id":{"kind":"string","value":"5c3c6eb79e77c6d683ed3174bdb532528a97e89d"},"revision_id":{"kind":"string","value":"5b8a8c50ed4c11e6403f08e106adb8ef52b4572e"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2021-01-15T17:20:56.846554","string":"2021-01-15T17:20:56.846554"},"revision_date":{"kind":"timestamp","value":"2011-02-14T18:17:53","string":"2011-02-14T18:17:53"},"committer_date":{"kind":"timestamp","value":"2011-02-14T18:17:53","string":"2011-02-14T18:17:53"},"github_id":{"kind":"number","value":1131027,"string":"1,131,027"},"star_events_count":{"kind":"number","value":0,"string":"0"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"#!/usr/bin/python\n#\n# _<_< > \n# ____________/_/ _/\n# _/ turkey.py _/ /_\n# / custom / \\ >\n# ( plugin \\_____//\n# \\________________/ \n# \n# First used with a Backwoods smoker to monitor\n# the temp of a turkey during cook time. You will\n# need to adjust the temp thresholds to match your\n# specific cooking requirements. Enjoy! \n#\n# ~Team Cloudkick, Thanksgiving 2010\n\nimport time\nimport struct\nimport sys\n\nldusb = file(\"/dev/ldusb0\")\n\ntime.sleep(0.5)\n\n# This reads the payload off of the Go!Temp USB drive\npkt = ldusb.read(8)\nparsed_pkt = list(struct.unpack(\" 200 and f < 300:\n status = 'ok'\nelse:\n status = 'err'\nprint 'status %s temp at %d' % (status, f)\nprint 'metric temp int %d' % (f)\n"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2011,"string":"2,011"}}},{"rowIdx":1099,"cells":{"__id__":{"kind":"number","value":4054449131639,"string":"4,054,449,131,639"},"blob_id":{"kind":"string","value":"1664d68bdf35734e13e114e8edfd50d290054319"},"directory_id":{"kind":"string","value":"8b9f896d8b9c457dcbb1032659c15717385c6711"},"path":{"kind":"string","value":"/pyarff/arff/ArffFile.py"},"content_id":{"kind":"string","value":"d3be7584008e896212808337382afa6e7267cfa2"},"detected_licenses":{"kind":"list like","value":[],"string":"[]"},"license_type":{"kind":"string","value":"no_license"},"repo_name":{"kind":"string","value":"nybblr/pyarff"},"repo_url":{"kind":"string","value":"https://github.com/nybblr/pyarff"},"snapshot_id":{"kind":"string","value":"bac42c7dace25ffbf82f657970e5733006a17a85"},"revision_id":{"kind":"string","value":"ae1f7b0fd0d4d82fea17ace8192717062d578149"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2021-01-20T03:34:21.844169","string":"2021-01-20T03:34:21.844169"},"revision_date":{"kind":"timestamp","value":"2013-03-05T02:32:58","string":"2013-03-05T02:32:58"},"committer_date":{"kind":"timestamp","value":"2013-03-05T02:32:58","string":"2013-03-05T02:32:58"},"github_id":{"kind":"null"},"star_events_count":{"kind":"number","value":0,"string":"0"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"class ArffAttribute(object):\n \"\"\"\n Name can be any alphanumerical value\n DataType can be any of STRING, NUMERIC or NOMINAL\n \"\"\"\n\n def __init__(self, index, name, dataType):\n self.index = index\n self.name = name\n self.isContinuous = False\n self.dataType = dataType.strip()\n self.values = None\n self.valuesCount = {}\n self.missing = 0\n self.mean = 0\n self.median = 0\n self.mode = None\n self.sum = 0\n self.min = None\n self.max = None\n self.stdDev = 0\n self.nbrOfBins = 0\n\n if self.dataType in [\"NUMERIC\", \"INTEGER\", \"REAL\"]:\n self.isContinuous = True\n\n elif dataType not in [\"NUMERIC\", \"INTEGER\", \"REAL\", \"STRING\"]:\n self.values = self.dataType[1:-1].split(\",\")\n self.nbrOfBins = len(self.values)\n self.dataType = \"NOMINAL\"\n for i in range(self.nbrOfBins):\n self.values[i] = self.values[i].strip()\n\n def getValues(self):\n values = []\n for k in self.valuesCount.iterkeys():\n values.append(k)\n return values\n\n def __str__(self):\n output = \"Name: %s, DataType: %s, IsContinuos: %d, Missing values: %d\" % (\n self.name, self.dataType, self.isContinuous, self.missing)\n\n if self.isContinuous == 1:\n output += \"\\n\\tMean: %.3f\" % self.mean\n output += \"\\n\\tMedian: %.3f\" % self.median\n output += \"\\n\\tMax: %.3f\" % self.max\n output += \"\\n\\tMin: %.3f\" % self.min\n\n else:\n output += \", Mode: %s\" % self.mode\n for key, value in self.valuesCount.items():\n output += \"\\n\\tValue %s: %4d\" % (key, value)\n\n return output\n\n\nclass ArffRecord(object):\n def __init__(self, values):\n self.values = []\n if isinstance(values, str):\n self.values = values.strip().split(\",\")\n else:\n self.values = values\n\n def __str__(self):\n return str(self.values)\n\n\nclass ArffFile(object):\n def __init__(self):\n self.relationName = \"\"\n self.records = []\n self.attributes = []\n self.countOfAttributes = 0\n self.countOfRecords = 0\n\n def getNameForAttribute(self, index):\n return self.attributes[index].name\n\n def getIndexForAttribute(self, name):\n for attr in self.attributes:\n if attr.name == name:\n return attr.index\n return -1\n\n def handleMissingValues(self, strategy):\n if strategy == DISCARD_RECORD:\n originalRecords = self.records # keep a copy\n self.records = [] # clear array\n for record in originalRecords:\n discard = False\n for attr in self.attributes:\n if record.values[attr.index] == \"?\":\n discard = True\n if not discard:\n self.records.append(record)\n elif strategy == MOST_PROBABLE:\n for record in self.records:\n for attr in self.attributes:\n if record.values[attr.index] == \"?\":\n if attr.isContinuous:\n record.values[attr.index] = attr.median\n else:\n record.values[attr.index] = attr.mode\n else:\n raise Exception(\"Unknown strategy\")\n"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2013,"string":"2,013"}}}],"truncated":false,"partial":false},"paginationData":{"pageIndex":10,"numItemsPerPage":100,"numTotalItems":42509,"offset":1000,"length":100}},"jwt":"eyJhbGciOiJFZERTQSJ9.eyJyZWFkIjp0cnVlLCJwZXJtaXNzaW9ucyI6eyJyZXBvLmNvbnRlbnQucmVhZCI6dHJ1ZX0sImlhdCI6MTc1NjMxOTMzOSwic3ViIjoiL2RhdGFzZXRzL2xvdWJuYWJubC9vbGRfcHl0aG9uIiwiZXhwIjoxNzU2MzIyOTM5LCJpc3MiOiJodHRwczovL2h1Z2dpbmdmYWNlLmNvIn0.DYP14my-nP4lZEYOz-F41TQTFJw0N9Vp9OCfX6He1xwjNGpWmNLJsPFKvM-9bhZU-sdvGnEJPW-nAVGcWidACg","displayUrls":true},"discussionsStats":{"closed":0,"open":1,"total":1},"fullWidth":true,"hasGatedAccess":true,"hasFullAccess":true,"isEmbedded":false,"savedQueries":{"community":[],"user":[]}}">
__id__
int64
3.09k
19,722B
blob_id
stringlengths
40
40
directory_id
stringlengths
40
40
path
stringlengths
2
256
content_id
stringlengths
40
40
detected_licenses
list
license_type
stringclasses
3 values
repo_name
stringlengths
5
109
repo_url
stringlengths
24
128
snapshot_id
stringlengths
40
40
revision_id
stringlengths
40
40
branch_name
stringlengths
4
42
visit_date
timestamp[ns]
revision_date
timestamp[ns]
committer_date
timestamp[ns]
github_id
int64
6.65k
581M
star_events_count
int64
0
1.17k
fork_events_count
int64
0
154
gha_license_id
stringclasses
16 values
gha_fork
bool
2 classes
gha_event_created_at
timestamp[ns]
gha_created_at
timestamp[ns]
gha_updated_at
timestamp[ns]
gha_pushed_at
timestamp[ns]
gha_size
int64
0
5.76M
gha_stargazers_count
int32
0
407
gha_forks_count
int32
0
119
gha_open_issues_count
int32
0
640
gha_language
stringlengths
1
16
gha_archived
bool
2 classes
gha_disabled
bool
1 class
content
stringlengths
9
4.53M
src_encoding
stringclasses
18 values
language
stringclasses
1 value
is_vendor
bool
2 classes
is_generated
bool
2 classes
year
int64
1.97k
2.01k
7,499,012,927,292
245583a36930123944eef662dd479631acf8b6ec
ed9d5731ce65e054241b870615c54548c1aa3148
/setup.py
b7434e36125929141b7fe1394db2d33ca650cfb8
[]
no_license
prestontimmons/django-dev-dashboard
https://github.com/prestontimmons/django-dev-dashboard
b16224f3c8d07e2c8f2cb9230b5c35159fdf9a23
4b83c6f323a90ccd6103bc59a509926d49db98db
refs/heads/master
2020-01-23T22:48:17.046044
2011-05-10T21:01:41
2011-05-10T21:01:41
1,729,023
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
from setuptools import setup setup( name='django-dev-dashboard', version='1.0', long_description=__doc__, packages=['dashboard'], include_package_data=True, zip_safe=False, install_requires=['Django>=1.3'] )
UTF-8
Python
false
false
2,011
1,434,519,083,984
392f53d64048f696a0e1a69d02e79fd19bcf733d
0fb1c4683c336429563e5f4efaaf0984149082aa
/002A. Winner.py
79179edd4073c29f3e9a5e009beda2cb1ce43ed7
[]
no_license
vsg/codeforces
https://github.com/vsg/codeforces
e4ae8ae0b059e6c9ae6371a15f4e2d694cfd64cb
f9059a1e17fa44a8f680852e8ebb1fe9c9fccab5
refs/heads/master
2020-03-30T08:02:15.222567
2011-06-23T18:11:34
2011-06-23T18:11:34
1,943,206
1
0
null
null
null
null
null
null
null
null
null
null
null
null
null
a = {} b = [] for _ in xrange(int(raw_input())): n, s = raw_input().split() if n in a: a[n] += int(s) else: a[n] = int(s) b += [(n, a[n])] ms = max(a.values()) for n, s in b: if a[n] == ms and s >= ms: print n break
UTF-8
Python
false
false
2,011
14,645,838,512,224
82dc19b3a323fbcbaa6f1ee770eaede5b233f22f
44cf5f9031727a59112622abf9bbec0d2773ae02
/Workroom/4_Jaehyun/combine_w_loading/apps/models.py
d621b3502056c317be396a8b02cb0c6b37cc8f0d
[]
no_license
DongsikHan/Calendar
https://github.com/DongsikHan/Calendar
8b5ada57d307fb62c51f8283f6dfecc364fc0ecc
6b9b115034172e21038174069d4e3a2e2e5b3615
refs/heads/master
2021-05-16T03:17:00.661325
2014-09-28T08:25:03
2014-09-28T08:25:03
23,070,464
0
1
null
false
2020-07-23T23:13:11
2014-08-18T12:32:45
2014-09-03T09:49:33
2014-09-28T08:25:14
14,435
0
1
2
Python
false
false
""" models.py """ from apps import db def dump_datetime(value): """Deserialize datetime object into string form for JSON processing.""" if value is None: return None return [value.strftime("%Y-%m-%d"), value.strftime("%H:%M:%S")] class Event(db.Model): id = db.Column(db.Integer, primary_key=True) title = db.Column(db.String(255)) title_cal = db.Column(db.String(255)) content = db.Column(db.Text()) host = db.Column(db.String(255)) category_char = db.Column(db.String(255)) category_host = db.Column(db.String(255)) date_created = db.Column(db.DateTime(timezone=True), default=db.func.now()) date_start = db.Column(db.DateTime(timezone=True)) date_end = db.Column(db.DateTime(timezone=True)) location = db.Column(db.String(255)) link = db.Column(db.String(255)) poster = db.Column(db.String(255)) contact = db.Column(db.String(255)) contact_open = db.Column(db.Boolean()) acceptance = db.Column(db.Boolean()) # http://stackoverflow.com/questions/7102754/jsonify-a-sqlalchemy-result-set-in-flask @property def serialize(self): """Return object data in easily serializeable format""" return { 'title': self.title, 'title_cal': self.title_cal, 'date_start': dump_datetime(self.date_start), 'date_end': dump_datetime(self.date_end) }
UTF-8
Python
false
false
2,014
3,238,405,344,548
f29f98ab10555f10bc55cc7298c23aa063f414f7
c80b3cc6a8a144e9858f993c10a0e11e633cb348
/components/ally/ally/notifier/impl/processor/register.py
033a8e8c989ea928a18f75643bbba4c97cfe92a2
[]
no_license
cristidomsa/Ally-Py
https://github.com/cristidomsa/Ally-Py
e08d80b67ea5b39b5504f4ac048108f23445f850
e0b3466b34d31548996d57be4a9dac134d904380
refs/heads/master
2021-01-18T08:41:13.140590
2013-11-06T09:51:56
2013-11-06T09:51:56
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
''' Created on Sep 12, 2013 @author: mihaigociu @package: ally base @copyright: 2012 Sourcefabric o.p.s. @license: http://www.gnu.org/licenses/gpl-3.0.txt @author: Mihai Gociu Registering the listeners for the file notifier. ''' import re from ally.container.ioc import injected from ally.design.processor.attribute import defines from ally.design.processor.handler import HandlerProcessor from ally.design.processor.context import Context from ally.support.util_spec import IDo import logging from urllib.parse import urlsplit, urljoin # -------------------------------------------------------------------- log = logging.getLogger(__name__) # -------------------------------------------------------------------- class ListenerRegister(Context): ''' The file system item context. ''' # ---------------------------------------------------------------- Defined uri = defines(str, doc=''' @rtype: string Pattern for the paths that this listener is interested in. ''') doMatch = defines(IDo, doc=''' @rtype: callable(item) -> boolean Matches the item path against the paths accepted by the listener. To be used for folders. @param item: Context The item to match. ''') doMatchResource = defines(IDo, doc=''' @rtype: callable(item) -> boolean Matches the item path against the paths accepted by the listener. To be used for resources (like files) not folders. @param item: Context The item to match. ''') doOnContentCreated = defines(IDo, doc=''' @rtype: callable(URI, content) Is called when an item for this listener is created. Should handle stream closing. @param URI: string Pattern for paths accepted by the listener. @param content: stream Stream with the content of the resource. ''') doOnContentChanged = defines(IDo, doc=''' @rtype: callable(URI, content) Is called when an item for this listener is changed. Should handle stream closing. @param URI: string Pattern for paths accepted by the listener. @param content: stream Stream with the content of the resource. ''') doOnContentRemoved = defines(IDo, doc=''' @rtype: callable(URI) Is called when an item for this listener is deleted. @param URI: string Pattern for paths accepted by the listener. @param content: stream Stream with the content of the resource. ''') class Register(Context): ''' The register context. ''' # ---------------------------------------------------------------- Defines listeners = defines(list, doc=''' @rtype: list[Context] The listeners for the items tree. ''') # -------------------------------------------------------------------- @injected class RegisterListeners(HandlerProcessor): ''' Implementation that provides the file system polling and notifying. ''' patterns = list # The URI patterns that this register is providing listeners for. def __init__(self): assert isinstance(self.patterns, list), 'Invalid patterns %s' % self.patterns super().__init__() def process(self, chain, register:Register, Listener:ListenerRegister, **keyargs): ''' @see: HandlerProcessor.process Register the listeners. ''' assert isinstance(register, Register), 'Invalid register %s' % register if register.listeners is None: register.listeners = [] for pattern in self.patterns: assert isinstance(pattern, str), 'Invalid pattern %s' % pattern # create the listener listener = Listener() assert isinstance(listener, ListenerRegister), 'Invalid listener %s' % listener listener.uri = pattern listener.doMatch, listener.doMatchResource = self.createMatch(pattern) listener.doOnContentCreated = self.doOnContentCreated listener.doOnContentChanged = self.doOnContentChanged listener.doOnContentRemoved = self.doOnContentRemoved register.listeners.append(listener) def createMatch(self, uriPattern): ''' Create the match for the provided items. ''' uriListener = urlsplit(uriPattern) pathListener = (uriListener.netloc + uriListener.path).split('/') # TODO: filename regex patterns = [re.compile(p) for p in ['\/'.join(pathListener[:i+1]).replace('*', '[a-zA-Z0-9_. \-]+')+'$' \ for i in range(len(pathListener))] if len(p) > 1] def doMatch(uri): uriItem = urlsplit(uri) pathItem = uriItem.netloc + uriItem.path if uriListener.scheme != uriItem.scheme: return False for pattern in patterns: if pattern.match(pathItem): return True return False def doMatchResource(uri): uriItem = urlsplit(uri) pathItem = uriItem.netloc + uriItem.path if uriListener.scheme != uriItem.scheme: return False if not patterns[-1].match(pathItem): return False return True return doMatch, doMatchResource #------------------------------------------------------------------Methods for listeners def doOnContentCreated(self, uri, content): ''' Parse the file or whatever. ''' self.doOnContentChanged(uri, content) def doOnContentChanged(self, uri, content): ''' Parse the file (again) or whatever. ''' assert log.debug('Content changed for URI: %s' % uri) or True def doOnContentRemoved(self, uri): ''' Do nothing for now. ''' assert log.debug('Content deleted for URI: %s' % uri) or True self.doOnContentChanged(uri, None)
UTF-8
Python
false
false
2,013
2,757,369,011,159
3e14c40eb0bda8bf334eb9b4ae226c4ed8555ab8
06f4438e0bb23e965c57506c0e916603647b5ca6
/strange-add/testuj.py
198fcb2b99474a797adedc36eb6c72876d7240d1
[]
no_license
Ciemny/PSI
https://github.com/Ciemny/PSI
cf1852b21d1afecb5edaf2df39328e64eded409f
63e8f647462e54cc3e9dbb406e14a898918e9b18
refs/heads/master
2021-01-02T08:21:35.464146
2013-05-22T00:07:11
2013-05-22T00:07:11
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
import solution from checker import Checker def checkStr(fun, args): return str(solution.strange_number(args)) def checkAdd(fun, args): A = solution.strange_number(args[0]) B = solution.strange_number(args[1]) return str(A+B) def checkPositive(fun, args): A = solution.strange_number(args) return A[1] def checkNegative(fun, args): A = solution.strange_number(args) return A[-1] def main(): checker = Checker() checker.addTest(checkStr, 253, "253", "str(strange_number(253))") checker.addTest(checkAdd, [12, 28], "1228", "str(A+B)") checker.addTest(checkPositive, 5, 5, "A[1]") checker.addTest(checkNegative, 10, -10 , "A[-1") checker.doTesting(solution.strange_number) if __name__ == '__main__': main()
UTF-8
Python
false
false
2,013
3,470,333,608,081
5f09ada0053b72c7bfb5d36c25557e0aec841b1d
d1b8fccf705570fdfe0d34fa899719ae7c1df5b5
/SAT_implementation/bool_formulas.py
e76946471442ab39055faf64451d529a77562c82
[]
no_license
KePcA/LVR-sat
https://github.com/KePcA/LVR-sat
8c87f3c885c3a9f7866365373973327d57aed0be
bb4e876919bb956b75c442d528f3892553f1ee6f
refs/heads/master
2021-01-18T17:19:23.738128
2014-05-02T15:08:02
2014-05-02T15:08:02
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
#coding:utf-8 """ Implementation of classes for the representation of a Boolean formula. """ ########################################## Class for representation of FALSE ########################################### class Fls(): def __init__(self): pass def __repr__(self): """ Representation of FALSE. """ return "F" def __eq__(self, other): """ Equality check. """ return isinstance(other, Fls) def __ne__(self, other): """ Not equality check. """ return not isinstance(other, Fls) def __lt__(self, other): """ Order check """ return True def evaluate(self, dict): """ Always evaluates to False. """ return False def replace(self, dict): """ Nothing to replace. """ return self ########################################### Class for representation of TRUE ########################################### class Tru(): def __init__(self): pass def __repr__(self): """ Representation of TRUE. """ return "T" def __eq__(self, other): """ Equality check. """ return isinstance(other, Tru) def __ne__(self, other): """ Not equality check. """ return not isinstance(other, Tru) def __lt__(self, other): """ Order check """ if isinstance(other, Tru): return True else: return not isinstance(other, Fls) def evaluate(self, dict): """ Always evaluates to True. """ return True def replace(self, dict): """ Nothing to replace. """ return self ######################################### Class for representation of VARIABLE ######################################### class Var: def __init__(self, name): """ Argument is the name of the variable. """ self.name = name def __repr__(self): """ Representation of a VARIABLE. """ return self.name def __eq__(self, other): """ Equality check. """ return isinstance(other, Var) and other.name == self.name def __ne__(self, other): """ Not equality check. """ return not isinstance(other, Var) or self.name != other.name def __lt__(self, other): """ Order check """ if isinstance(other, Var): return self.name < other.name else: return not isinstance(other, Tru) and not isinstance(other, Fls) def evaluate(self, dict): """ Returns the variable contained in the specified dictionary or None if the variable doesn't exist. """ return dict.get(self.name).evaluate(dict) def replace(self, dict): """ Replaces all of the occurrences of the Vars that are represented by the keys in the values dictionary with the value located in the dictionary (key - name of Var, value - value of Var). If a Var with the name isn't defined with a key in the dictionary, the Var isn't replaced. """ value = dict.get(self.name) if value is None: return self else: return value ######################################### Class for representation of NEGATION ######################################### class Not(): def __init__(self, formula): """ Argument is a representation of another boolean formula. """ self.formula = formula def __repr__(self): """ Representation of NOT. """ return "¬" + repr(self.formula) def __eq__(self, other): """ Equality check. """ return isinstance(other, Not) and self.formula == other.formula def __ne__(self, other): """ Not equality check. """ return not isinstance(other, Not) or self.formula != other.formula def __lt__(self, other): """ Order check """ if isinstance(other, Not): return self.formula < other.formula else: return not isinstance(other, Var) and not isinstance(other, Tru) and not isinstance(other, Fls) def evaluate(self, dict): """ Returns the negation of our formula which value we get from the dictionary of variables' values (dict). """ return not self.formula.evaluate(dict) def replace(self, dict): """ Replaces all of the occurrences of the Vars that are represented by the keys in the values dictionary with the value located in the dictionary (key - name of Var, value - value of Var). If a Var with the name isn't defined with a key in the dictionary, the Var isn't replaced. """ return Not(self.formula.replace(dict)) ########################################### Class for representation of AND ############################################ class And(): def __init__(self, formulas): """ Argument formulas represents list of formulas which are combined to a single formula by conjunctions. """ self.formulas = formulas def __repr__(self): """ Representation of AND. """ string = "" for i in xrange(len(self.formulas) - 1): string += repr(self.formulas[i]) string += " ^ " string += repr(self.formulas[len(self.formulas) - 1]) return "(" + string + ")" def __eq__(self, other): """ Equality check. """ return isinstance(other, And) and self.formulas == other.formulas def __ne__(self, other): """ Not equality check. """ return not isinstance(other, And) or self.formulas != other.formulas def __lt__(self, other): """ Order check """ if isinstance(other, And): return self.formulas < other.formulas else: return not isinstance(other, Not) and not isinstance(other, Var) and not isinstance(other, Tru) and not isinstance(other, Fls) def evaluate(self, dict): """ Returns the conjunction of values of all the formulas being present in the list. We stop as soon as one of the formula is false. """ for formula in self.formulas: if formula.evaluate(dict) is False: return False return True def replace(self, dict): """ Replaces all of the occurrences of the Vars that are represented by the keys in the values dictionary with the value located in the dictionary (key - name of Var, value - value of Var). If a Var with the name isn't defined with a key in the dictionary, the Var isn't replaced. """ return And([x.replace(dict) for x in self.formulas]) def isEmpty(self): """ If conjunction contains no variables """ if self.formulas: return False else: return True ############################################ Class for representation of OR ############################################ class Or(): def __init__(self, formulas): """ Argument formulas represents list of formulas which are combined to a single formula by disjunctions. """ self.formulas = formulas def __repr__(self): """ Representation of OR. """ string = "" for i in xrange(len(self.formulas) - 1): string += repr(self.formulas[i]) string += " ∨ " string += repr(self.formulas[len(self.formulas) - 1]) return "(" + string + ")" def __eq__(self, other): """ Equality check. """ return isinstance(other, Or) and self.formulas == other.formulas def __ne__(self, other): """ Not equality check. """ return not isinstance(other, Or) or self.formulas != other.formulas def __lt__(self, other): """ Order check """ return isinstance(other, Or) and self.formulas < other.formulas def evaluate(self, dict): """ Returns the disjunction of values of all the formulas being present in the list. We stop as soon as one of the formula is true. """ for formula in self.formulas: if formula.evaluate(dict) is True: return True return False def replace(self, dict): """ Replaces all of the occurrences of the Vars that are represented by the keys in the values dictionary with the value located in the dictionary (key - name of Var, value - value of Var). If a Var with the name isn't defined with a key in the dictionary, the Var isn't replaced. """ return Or([x.replace(dict) for x in self.formulas]) def isEmpty(self): """ If disjunction contains no variables """ if self.formulas: return False else: return True
UTF-8
Python
false
false
2,014
12,532,714,578,492
169b3a5120e46919bf1bcb841c3e450ebc863a41
84bd24dc12f677620cb9c99e70db06a636257c1a
/energy_level_plotter.py
c5eec8ccbff4b2a3fb5cc1e6138dc8c5a90061ce
[]
no_license
gendry/energy_level_diagram
https://github.com/gendry/energy_level_diagram
599987589e58f3a6f014f77df4e4c0dbf82342d0
d6914e809175d27ed0b58efb906bf6542f820b9d
refs/heads/master
2020-05-19T08:53:28.710396
2014-11-28T20:18:10
2014-11-28T20:18:10
27,280,772
0
2
null
null
null
null
null
null
null
null
null
null
null
null
null
#for the plots from pylab import * #for ordered dicts import collections #list of all the materials and workfunctions from full_materials_list_mod import full_materials_list #colours for the plots, max number of plots currently limited to number of values in this list e.g. 11 colours = ['#d3d3d3', '#bdbdbd', '#a8a8a8', '#939393', '#7e7e7e', '#696969', '#545454', '#3f3f3f', '#2a2a2a', '#151515', '#000000'] #Make a dictionary of all the materials, doesn't really need to be an ordered one all_materials = collections.OrderedDict(full_materials_list) #Empty list for all the material to be plotted list_materials = [] #Enter the name of the material to be plotted or type exit to end entering new materials while True: item = str(raw_input('Next material name or exit:')) if item == 'exit': break #add each material to the list list_materials.append(item) #make a new dictionary using the list of materials names to match the keys in the full dict materials = collections.OrderedDict((k, all_materials[k]) for k in list_materials if k in all_materials) #the max energy on the graph maximum = 0 #the min enery on the graph rounded down from the lowest value in the materials selected minimum = int(round((min(l[0] for l in materials.values())) - 1)) ticks =[] tick_labels = [] fig, ax = plt.subplots() i = 1 #Go through each material and plot HOMO LUMO or wf for each for key, value in materials.iteritems(): #Choose a new colour from the list for each colour = colours[i-1] lumo = max(value) homo = min(value) #if its a wf just plot one if homo == lumo: ax.broken_barh([(i-0.5,1)] , (homo, minimum-homo), facecolors=colour) #otherwise plot both HOMO and LUMO else: ax.broken_barh([(i-0.5,1)] , (homo, minimum-homo), facecolors=colour) ax.broken_barh([(i-0.5,1)] , (lumo, maximum-lumo), facecolors=colour) ticks.append(i) tick_labels.append(key) i = i + 1 ax.set_title('Energy Level Diagram') ax.set_xlim(0,i) ax.set_ylim(minimum,0) ax.set_ylabel('Energy Level (eV)') ax.set_xticks(ticks) ax.set_xticklabels(tick_labels) ax.grid(True) plt.show()
UTF-8
Python
false
false
2,014
10,153,302,717,811
cc3421b2a6a319bbe102d79d831ae18e950764f6
a079c4229bef89ca36987876cb970d1e526b0db2
/iaas/ext/login.py
6650aab94944f0198da241be716664041405b20a
[]
no_license
banama/snippet
https://github.com/banama/snippet
fdc3070ac3f929ba0208db53c9fd23af6e192daa
02ca370f0117ffa11e88ac9fcd6f8bad4b999efd
refs/heads/master
2015-08-10T21:01:06
2014-11-17T14:44:14
2014-11-17T14:44:14
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
#coding:utf8 from flask import session, url_for, redirect from functools import wraps def require_visitor(func): """Require visitor""" @wraps(func) def decorator(*args, **kw): if 'username' in session: return redirect(url_for('site.index')) return func(*args, **kw) return decorator def require_login(func): """Require login""" @wraps(func) def decorator(*args, **kw): if 'username' not in session: return redirect(url_for('user.login')) return func(*args, **kw) return decorator def login(username, id, email): session['username'] = username session['id'] = id session['email'] = email def logout(): session.pop('username', None) session.pop('email', None) session.pop('id', None)
UTF-8
Python
false
false
2,014
9,354,438,791,476
552e2aab83a8334fcfcdf86a7f5ba539d026c26e
7d8d9f60b14d688a657e5727c089463232dec758
/test_makeLOVE.py
6174119f63d7d9d8f5d7c283e483d77992e78319
[]
no_license
JR-Carroll/makeLOVE
https://github.com/JR-Carroll/makeLOVE
d19f07db2fab50e8b4161542e948e8ba16680800
50e444071aad32f269e22a2bdc04d5b0a40854ac
refs/heads/master
2021-05-26T15:23:49.501086
2013-09-11T11:31:21
2013-09-11T11:31:21
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
#!/usr/bin/env python # -*- coding: utf-8 -*- import unittest class TestImports(unittest.TestCase): def test_print_function(self): pass def test_print_function(self): pass def test_print_function(self): pass class TestZipFiles(unittest.TestCase): def test_default_init_values(self): pass def test_except_GetOptError(self): pass def test_general_execpt(self): pass def test_mthd_zip_all(self): pass def test_mthd_print_files(self): pass def test_mthd_print_help(self): pass def test_mthd_safe_for_zipping(self): pass def test_mthd_safe_for_zipping(self): pass def test_status_of_run(self): pass
UTF-8
Python
false
false
2,013
8,658,654,070,616
10ef90c95c763d670ce573158e0a1046737c5fac
0a7d60f0f79fa6d00b3d99e42f4eb5bd84b5d147
/teuthology/schedule.py
a147e3d16458b85a63ec813bf29f86ac042c181f
[]
no_license
yehudasa/teuthology
https://github.com/yehudasa/teuthology
792bfdc8078be2e983b2fdf9d1b6744e92d99a92
59ee17dc1944d35966693369edff785c5e73dc1d
refs/heads/master
2021-01-15T20:57:11.704086
2014-05-30T14:59:36
2014-05-30T14:59:36
20,341,792
1
1
null
null
null
null
null
null
null
null
null
null
null
null
null
import yaml import teuthology.beanstalk from teuthology.misc import get_user from teuthology.misc import read_config from teuthology import report def main(ctx): if ctx.owner is None: ctx.owner = 'scheduled_{user}'.format(user=get_user()) read_config(ctx) beanstalk = teuthology.beanstalk.connect() tube = ctx.worker beanstalk.use(tube) if ctx.show: for job_id in ctx.show: job = beanstalk.peek(job_id) if job is None and ctx.verbose: print 'job {jid} is not in the queue'.format(jid=job_id) else: print '--- job {jid} priority {prio} ---\n'.format( jid=job_id, prio=job.stats()['pri']), job.body return if ctx.delete: for job_id in ctx.delete: job = beanstalk.peek(job_id) if job is None: print 'job {jid} is not in the queue'.format(jid=job_id) else: job.delete() name = yaml.safe_load(job.body).get('name') if name: report.try_delete_jobs(name, job_id) return # strip out targets; the worker will allocate new ones when we run # the job with --lock. if ctx.config.get('targets'): del ctx.config['targets'] job_config = dict( name=ctx.name, last_in_suite=ctx.last_in_suite, email=ctx.email, description=ctx.description, owner=ctx.owner, verbose=ctx.verbose, machine_type=ctx.worker, ) # Merge job_config and ctx.config job_config.update(ctx.config) if ctx.timeout is not None: job_config['results_timeout'] = ctx.timeout job = yaml.safe_dump(job_config) num = ctx.num while num > 0: jid = beanstalk.put( job, ttr=60 * 60 * 24, priority=ctx.priority, ) print 'Job scheduled with name {name} and ID {jid}'.format( name=ctx.name, jid=jid) job_config['job_id'] = str(jid) report.try_push_job_info(job_config, dict(status='queued')) num -= 1
UTF-8
Python
false
false
2,014
15,169,824,530,677
a441743bed4ebfab2f0ab359e704c16ce4a93a6d
eb285ef4e08f28ef58483f30bae99f88cf52acb4
/tests/test_ec2adapters.py
8e1c09577015384281c689cb61eedb7746f6e05a
[ "MIT" ]
permissive
renanivo/cloudsnap
https://github.com/renanivo/cloudsnap
e0e77f67c07428e7814689843e75e6b589c52f17
d28a0abd49abe80688c4a85d0a9ba0fb70618538
refs/heads/master
2020-05-19T07:20:47.591535
2013-01-03T16:42:24
2013-01-03T16:42:24
2,730,509
2
0
null
false
2014-08-29T19:16:19
2011-11-08T00:32:47
2014-06-07T02:21:51
2014-06-07T02:21:51
14,770
4
1
0
Python
null
null
import unittest import datetime from mock import MagicMock, Mock, patch from ec2adapters import EC2Account from settings import AWS class EC2AccountTest(unittest.TestCase): def _get_instance_mock(self, id, tags={}): instance_mock = MagicMock(spec='boto.ec2.instance.Instance') instance_mock.id = id instance_mock.tags = tags return instance_mock @patch('ec2adapters.EC2Connection') def test_should_create_a_connection_when_not_given(self, connection_mock): EC2Account() connection_mock.assert_called_once_with( AWS['key'], AWS['secret'], is_secure=AWS['use_safe_connection'], validate_certs=AWS['validate_certs']) @patch('ec2adapters.EC2Connection') def test_should_get_an_instance_list(self, connection_mock): reservation_mock1 = Mock(spec='boto.ec2.instance.Reservation') reservation_mock2 = Mock(spec='boto.ec2.instance.Reservation') instance_mock1 = Mock(spec='boto.ec2.instance.Instance') instance_mock2 = Mock(spec='boto.ec2.instance.Instance') reservation_mock1.instances = [instance_mock1] reservation_mock2.instances = [instance_mock2] connection_mock.get_all_instances.return_value = [reservation_mock1, reservation_mock2] a = EC2Account(connection_mock) instances = a.get_instances() self.assertEqual(1, connection_mock.get_all_instances.call_count) self.assertEqual(2, len(instances)) self.assertIn(instance_mock1, instances) self.assertIn(instance_mock2, instances) @patch('ec2adapters.AMI_NAME_TEMPLATE', '%(today)s-%(name)s') @patch('ec2adapters.EC2Connection') def test_should_backup_an_instance_and_get_the_AMI_id(self, connection_mock): connection_mock.create_image.return_value = 99 instance_mock = self._get_instance_mock(11, {"Name": "instance_name"}) account = EC2Account(connection_mock) self.assertEqual(99, account.backup_instance(instance_mock)) @patch('ec2adapters.AMI_NAME_TEMPLATE', '%(today)s-%(name)s') @patch('ec2adapters.EC2Connection') def test_should_use_boto_to_backup_an_instance(self, connection_mock): instance_mock = self._get_instance_mock(11, {"Name": "instance_name"}) account = EC2Account(connection_mock) account.backup_instance(instance_mock) connection_mock.create_image.assert_called_once_with(11, "%s-%s" % (datetime.date.today(), "instance_name")) @patch('ec2adapters.AMI_NAME_TEMPLATE', '%(today)s-%(name)s') @patch('ec2adapters.EC2Connection') def test_should_backup_an_instance_with_time_and_instance_id_on_tags(self, connection_mock): connection_mock.create_image.return_value = 99 instance_mock = self._get_instance_mock(11, {"Name": "instance_name"}) account = EC2Account(connection_mock) account.backup_instance(instance_mock) connection_mock.create_tags.assert_called_once_with([99], {"instance": 11, "created_at": datetime.date.today(), "created_by": "cloudsnap"}) @patch('boto.ec2.image.Image') @patch('boto.ec2.EC2Connection') def test_should_get_a_list_of_backups(self, connection_mock, image_mock): image_mock.tags = {"created_by": "cloudsnap"} connection_mock.get_all_images.return_value = [image_mock] account = EC2Account(connection_mock) backups = account.get_backups() self.assertEqual(1, len(backups)) self.assertIn(image_mock, backups) connection_mock.get_all_images.assert_called_once() @patch('boto.ec2.image.Image') @patch('boto.ec2.image.Image') @patch('boto.ec2.EC2Connection') def test_should_not_get_AMIs_not_created_by_it(self, connection_mock, image_mock1, image_mock2): account = EC2Account(connection_mock) account.get_backups() connection_mock.get_all_images.assert_called_once_with( filters={"tag:created_by": "cloudsnap"}) @patch('boto.ec2.image.Image') @patch('boto.ec2.EC2Connection') def test_should_delete_a_backup(self, connection_mock, image_mock): account = EC2Account(connection_mock) image_mock.id = 10 account.delete_backup(image_mock) connection_mock.deregister_image.assert_called_once_with(image_mock.id, True) if __name__ == '__main__': unittest.main()
UTF-8
Python
false
false
2,013
19,507,741,463,836
ab6af2efa1104559024589c4bfb8d17a032642ae
df902ddba37714769ce30107bff8673cf8a781c0
/itoaster.py
049af2cf7982610bf249d43e87339884a551479f
[ "GPL-3.0-only" ]
non_permissive
anoochit/itoster
https://github.com/anoochit/itoster
0f7e1db2a26b6cb193048e2d1a75b6d00b192a10
7ca751b7cddfdb7e913df6d143ed0500d5d95898
refs/heads/master
2021-01-01T05:48:14.477569
2009-05-23T13:19:19
2009-05-23T13:19:19
32,371,140
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
#!/usr/bin/python import pygtk import gtk import gtk.glade import string import os import gobject import pickle try: import pygtk pygtk.require("2.0") except: pass try: import gtk import gtk.glade except: print("GTK Not Availible") sys.exit(1) class gui: iso_file = {} iso_title = {} def __init__(self): # FIXME: you can use sqlite to store cd data # Read a file file = open("distrolist.txt") self.index=1 for line in file.readlines(): # make dict for iso_file line_str=line.split('|') self.dictname=self.index; # FIXME: fix total button = 26 and label must not null if (self.index < 26) and (line_str[0]!="") : gui.iso_file[self.dictname]=line_str[1] gui.iso_title[self.dictname]=line_str[0] self.index=self.index+1 file.close() # show dict print gui.iso_file print gui.iso_title # Load GUI self.wTree=gtk.glade.XML('itoaster.glade') dic = { "on_cancel": (gtk.main_quit), "on_burn": self.on_burn } self.wTree.signal_autoconnect(dic) self.win = self.wTree.get_widget("window1") self.win.connect("delete_event", self.on_delete_event) self.win.maximize() # set button label for index,filename in gui.iso_file.iteritems(): self.button_name="button" + str(index) self.button = self.wTree.get_widget(self.button_name) self.button.set_label(gui.iso_title[index]) self.win.show() def on_burn(self, widget): self.name= widget.name self.file_index= int((self.name.split('button')[1])) # print iso filename print gui.iso_file[self.file_index] # FIXME: I didn't check for file exist self.isofile="nautilus-cd-burner --source-iso=iso/"+gui.iso_file[self.file_index] print self.isofile # run nautilus cd burner with selected iso image os.system(self.isofile) def on_delete_event(self, widget, event): self.win.set_sensitive(False) dialog = gtk.MessageDialog(self.win, gtk.DIALOG_MODAL | gtk.DIALOG_DESTROY_WITH_PARENT, gtk.MESSAGE_INFO, gtk.BUTTONS_YES_NO, None) dialog.set_markup('<big><b>Are you sure you want to quit?</b></big>') dialog.connect("destroy", lambda w: self.win.set_sensitive(True)) answer = dialog.run() if answer == -8: dialog.destroy() return False if answer == -9: dialog.destroy() return True app = gui() gtk.main()
UTF-8
Python
false
false
2,009
10,187,662,467,181
45bda1bc35fe45da9e9b246ac753ee4ea7cf9242
0f5f9dde324fc84d4be93d17745de72262ca5dad
/gpycomplete/helpers.py
d5c61fe470c92c9061915fb181d2d456e2a5247b
[ "GPL-3.0-only", "LicenseRef-scancode-unknown-license-reference" ]
non_permissive
fgallina/gpycomplete
https://github.com/fgallina/gpycomplete
f6d5a5c02c363baaa8897870a2bd495b4df6c218
6c7f600feb7d62369fbc71fc3cf1b24eb76afbd5
refs/heads/master
2021-01-19T13:50:29.737414
2010-04-28T12:03:46
2010-04-28T12:03:46
86,621
3
0
null
null
null
null
null
null
null
null
null
null
null
null
null
# This file is part of gpycomplete. # gpycomplete is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # gpycomplete is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # You should have received a copy of the GNU General Public License # along with gpycomplete. If not, see <http://www.gnu.org/licenses/>. # gpycomplete is written from scratch by Fabian Ezequiel Gallina # <fgallina at gnu dot org dot ar> but it is somehow based on the # original pycomplete package from the http://python-mode.sf.net. # gpycomplete allows inline completion and help for the python # programing language within GNU/Emacs import pydoc import types import inspect import context def get_signature(obj): """Returns the signature of the given object. Inspired in the original pycomplete package """ # FIXME - make this function less ugly paren = obj.find("(") if paren != -1: obj = obj[:paren] context_dict = 'subprogram_globals' if not obj in context.get_context(): context_dict = 'helper_globals' if not context.cimport(obj, context_dict): return "no signature for " + obj try: obj = context.eval_code(obj, context_dict) except: return "no signature for " + obj sig = "" # This part is extracted from the pycomplete.py file if type(obj) in (types.ClassType, types.TypeType): obj = _find_constructor(obj) elif type(obj) == types.MethodType: obj = obj.im_func if type(obj) in [types.FunctionType, types.LambdaType]: (args, varargs, varkw, defaults) = inspect.getargspec(obj) sig = ('%s: %s' % (obj.__name__, inspect.formatargspec(args, varargs, varkw, defaults))) doc = getattr(obj, '__doc__', '') if doc and not sig: doc = doc.lstrip() pos = doc.find('\n') if pos < 0 or pos > 70: pos = 70 sig = doc[:pos] return sig def _find_constructor(class_ob): # This part is extracted from the pycomplete.py file # Given a class object, return a function object used for the # constructor (ie, __init__() ) or None if we can't find one. try: return class_ob.__init__.im_func except AttributeError: for base in class_ob.__bases__: rc = _find_constructor(base) if rc is not None: return rc return None def get_help(obj): """Returns the help of the given object. Inspired in the original pycomplete package """ paren = obj.rfind("(") if paren != -1: obj = obj[:paren] if obj.endswith("(") or obj.endswith("."): obj = obj[:-1] found = False pobj = None context_dict = 'subprogram_globals' if not obj in context.get_context(): context_dict = 'helper_globals' found = context.cimport(obj, context_dict) else: pobj = context.eval_code(obj) if obj not in context.subcontext_globals and found: pobj = context.eval_code(obj, context_dict) if not pobj: return "no help string for " + obj obj = context.eval_code(obj) return pydoc.getdoc(obj)
UTF-8
Python
false
false
2,010
8,083,128,456,370
1645b4016efe960b49f242d5ad87e46a3c087fa4
503f5089422a97dc6f496cb7ecdaaf711611e5c0
/気
f97deee3533aeed5b37b6c14cb1aba9afbf9b125
[]
no_license
jd/ki
https://github.com/jd/ki
b3e782ed176ea38099aff8ba0aea4e1c06ba754b
343eeee119e2167a52e882d7772ecf3fe8f04d3a
refs/heads/main
2023-05-06T07:06:47.694980
2012-03-13T16:17:26
2012-03-13T16:17:26
363,116,328
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
#!/usr/bin/env python # -*- coding: utf-8 -*- # # ki-client -- Distributed file system client # # Copyright © 2011 Julien Danjou <[email protected]> # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. import sys import os import dbus import ki.storage import argparse import tempfile import time def _edit_with_tempfile(s, suffix=""): tmpf = tempfile.mktemp() + suffix with file(tmpf, "w") as f: f.write(s) if os.system("%s %s" % (os.getenv("EDITOR"), tmpf)) == 0: with file(tmpf, "r") as f: s = f.read() else: s = None try: os.unlink(tmpf) except: pass return s def recordlist(box, **kwargs): box_path = storage.GetBox(box) for (sha, commit_time) in bus.get_object(ki.storage.BUS_INTERFACE, box_path).RecordList(): print "%s %s" % (sha, time.strftime("%a, %d %b %Y %H:%M:%S %z", time.localtime(commit_time))) def _config(obj, what): if what == 'set': obj.SetConfig(sys.stdin.read()) elif what == 'edit': obj.SetConfig(_edit_with_tempfile(obj.GetConfig(), ".js")) else: print obj.GetConfig() def config(what, **kwargs): _config(storage, what) def remote_add(name, url, weight, **kwargs): storage.AddRemote(name, url, weight) def remote_remove(name, **kwargs): storage.RemoveRemote(name) def remote_list(**kwargs): for item in storage.ListRemotes(): r = bus.get_object(ki.storage.BUS_INTERFACE, item) print "%s:" % r.GetName() print " ID: %s" % r.GetID() print " URL: %s" % r.GetURL() print " Weight: %d" % r.GetWeight() def _remote_name_to_obj(name): return bus.get_object(ki.storage.BUS_INTERFACE, "%s/remotes/%s" % (storage.__dbus_object_path__, name)) def remote_showrefs(name, **kwargs): r = _remote_name_to_obj(name) for ref, sha in r.GetRefs().iteritems(): print " %30s %s" % (ref[-30:], sha) def remote_config(what, name, **kwargs): _config(_remote_name_to_obj(name), what) def info(**kwargs): print (u"Path: %s" % storage.GetPath()).encode('utf-8') print (u"ID: %s" % storage.GetID()).encode('utf-8') def box_create(name, **kwargs): box_path = storage.CreateBox(name) def box_commit(name, **kwargs): box_path = storage.GetBox(name) bus.get_object(ki.storage.BUS_INTERFACE, box_path).Commit() def box_list(**kwargs): for box in storage.ListBoxes(): print box def box_mount(name, mountpoint, **kwargs): box_path = storage.GetBox(name) bus.get_object(ki.storage.BUS_INTERFACE, box_path).Mount(mountpoint) parser = argparse.ArgumentParser() parser.add_argument('--storage', type=str, help='Storage path.') subparsers = parser.add_subparsers(help='Action to perform.', title="Actions", description="Actions to perform on the given box.") # Info parser_info = subparsers.add_parser('info', help='Show storage information.') parser_info.set_defaults(action=info) # Config parser_config = subparsers.add_parser('config', help='Dump or set storage configuration.') parser_config.set_defaults(action=config) parser_config.add_argument('what', type=str, choices=['dump', 'set', 'edit'], help='The action to perform.') # Recordlist parser_recordlist = subparsers.add_parser('recordlist', help='Show the list of records of a box.') parser_recordlist.set_defaults(action=recordlist) parser_recordlist.add_argument('box', type=str, help='The box to show records list of.') # Box parser_box = subparsers.add_parser('box', help='Manage boxes.') subparsers_box = parser_box.add_subparsers(help='Action to perform on boxes.', title='Actions', description='Action to perform on a given box.') # box create parser_box_create = subparsers_box.add_parser('create', help='Create a box.') parser_box_create.set_defaults(action=box_create) parser_box_create.add_argument('name', type=str, help='The name of the box to create.') # box commit parser_box_commit = subparsers_box.add_parser('commit', help='Commit a box immediately.') parser_box_commit.set_defaults(action=box_commit) parser_box_commit.add_argument('name', type=str, help='The name of the box to commit.') # box list parser_box_list = subparsers_box.add_parser('list', help='List existing boxes.') parser_box_list.set_defaults(action=box_list) # box mount parser_box_mount = subparsers_box.add_parser('mount', help='Mount a box.') parser_box_mount.set_defaults(action=box_mount) parser_box_mount.add_argument('name', type=str, help='The name of the box to mount.') parser_box_mount.add_argument('mountpoint', type=str, help='The directory to mount the box into.') # Remotes parser_remote = subparsers.add_parser('remote', help='Act on remotes.') subparsers_remote = parser_remote.add_subparsers(help='Action to perform on remotes.', title='Actions', description='Action to perform on the given remote of a storage.') ## List parser_remote_list = subparsers_remote.add_parser('list', help='List remotes.') parser_remote_list.set_defaults(action=remote_list) ## Showrefs parser_remote_showrefs = subparsers_remote.add_parser('showrefs', help='List remote refs.') parser_remote_showrefs.add_argument('name', type=str, help='Remote name.') parser_remote_showrefs.set_defaults(action=remote_showrefs) ## Add parser_remote_add = subparsers_remote.add_parser('add', help='Add a remote.') parser_remote_add.set_defaults(action=remote_add) parser_remote_add.add_argument('name', type=str, help='Remote name.') parser_remote_add.add_argument('url', type=str, help='Remote URL.') parser_remote_add.add_argument('weight', type=int, default=100, nargs='?', help='Remote weight.') ## Remove parser_remote_remove = subparsers_remote.add_parser('remove', help='Remove a remote.') parser_remote_remove.set_defaults(action=remote_remove) parser_remote_remove.add_argument('name', type=str, help='Remote name.') ## Config parser_remote_config = subparsers_remote.add_parser('config', help='Remote config access.') parser_remote_config.set_defaults(action=remote_config) parser_remote_config.add_argument('what', type=str, choices=['dump', 'set', 'edit'], help='The action to perform.') parser_remote_config.add_argument('name', type=str, help='Remote name.') args = parser.parse_args() # Now connect. bus = dbus.SessionBus() storage_manager = bus.get_object(ki.storage.BUS_INTERFACE, "%s/StorageManager" % ki.storage.BUS_PATH) if args.storage is not None: storage_path = storage_manager.GetStorage(args.storage) else: storage_path = storage_manager.GetUserStorage() storage = bus.get_object(ki.storage.BUS_INTERFACE, storage_path) args.action(**args.__dict__)
UTF-8
Python
false
false
2,012
10,788,957,850,296
8ef14ace73c7eb222b501fcb68d0e9c8bc1ca6c8
67f748153a0d7dcb96802ef50bb2604dac323a24
/console/pygtk_console.py
091260705140a13eae53dfb64a9355dcd5fb0a09
[ "MIT", "LicenseRef-scancode-ian-piumarta" ]
non_permissive
antoinevg/xo-lambda
https://github.com/antoinevg/xo-lambda
c37f68161de1bcf06f0cd7d85407db3f37934d2f
0cc8bc218a2ffb8b9713a94ff215acb5c2ffd4bd
refs/heads/master
2016-08-06T23:00:57.577422
2011-05-18T01:21:07
2011-05-18T01:21:07
1,763,818
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
#!/usr/bin/env python2.5 import sys sys.path.append('.') import pygtk pygtk.require('2.0') import gtk import gtk.gdk import cairo import ctypes class PycairoContext(ctypes.Structure): _fields_ = [("PyObject_HEAD", ctypes.c_byte * object.__basicsize__), ("ctx", ctypes.c_void_p), ("base", ctypes.c_void_p)] import console default_width = 492 default_height = 344 class GtkLambda: def __init__(self): self.window = gtk.Window(gtk.WINDOW_TOPLEVEL) self.window.set_resizable(True) self.window.set_app_paintable(True) # self.window.set_icon_from_file("lambda-console.png") self.window.set_title("lambda-console") self.window.set_default_size(default_width, default_height) self.window.show_all() #context = gtk.gdk.cairo_create(self.window) context = self.window.window.cairo_create() cairo_t = PycairoContext.from_address(id(context)).ctx self.console = console.Console(cairo_t, 10.0) self.window.connect("delete_event", gtk.main_quit) self.window.connect("key-press-event", self.key_press_event) self.window.connect("expose-event", self.expose_event) self.window.show() def key_press_event(self, widget, event): if event.type is not gtk.gdk.KEY_PRESS: print "Not a gtk.gdk.KEY_PRESS" return keyval = event.keyval if keyval == gtk.keysyms.Escape: gtk.main_quit() elif keyval == gtk.keysyms.Return: keyval = ord('\n') elif keyval == gtk.keysyms.Shift_L: return False elif keyval == gtk.keysyms.Shift_R: return False elif keyval == gtk.keysyms.BackSpace: self.console.key_backspace() elif keyval == gtk.keysyms.Delete: self.console.key_delete() elif keyval == gtk.keysyms.Left: self.console.key_left() elif keyval == gtk.keysyms.Right: self.console.key_right() elif keyval == gtk.keysyms.Up: self.console.key_up() elif keyval == gtk.keysyms.Down: self.console.key_down() if keyval >= 0 and keyval <= 255: self.console.key_press(keyval) self.window.queue_draw() return False def expose_event(self, widget, event): self.console.width = widget.allocation.width self.console.height = widget.allocation.height context = widget.window.cairo_create() # TODO - set console width context.set_source_rgb(1, 1, 1) context.paint() # TODO - transparency & anti-alasing options context.save() cairo_t = PycairoContext.from_address(id(context)).ctx self.console.expose(cairo_t) context.restore() return False def main(self): gtk.main() def destroy(self, widget, data=None): gtk.main_quit() if __name__ == "__main__": gtk_lambda = GtkLambda() gtk_lambda.main()
UTF-8
Python
false
false
2,011
16,415,365,014,469
3ff9e5b94d7b07e5aa28b56f313bb9bd6b4f4c42
244692db4317f9358f898280779dc229e39d6849
/buggy/forms.py
7372b8579e700591d6814f0b3601d2b01f121db1
[]
no_license
batiste/django-buggy
https://github.com/batiste/django-buggy
c75bfb471da80789dcd60dc5bf1080ecf9cbf525
74e5f49f26e78e5a7da262db94dda3c1209cb04b
refs/heads/master
2021-01-01T18:29:09.466413
2013-08-29T09:15:19
2013-08-29T09:15:19
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
from django.forms import ModelForm from buggy.models import Ticket, Comment class CreateTicketForm(ModelForm): class Meta: model = Ticket exclude = ["author", "project"] class EditTicketForm(ModelForm): class Meta: model = Ticket exclude = ["author", "project"] class EditTicketForm(ModelForm): class Meta: model = Ticket exclude = ["author", "project"] class CreateCommentForm(ModelForm): class Meta: model = Comment exclude = ["author", "ticket", "status_before"]
UTF-8
Python
false
false
2,013
11,587,821,783,398
e80d5ded96bd0a8bc845936c005369de2120f187
23e3c24c7045591c7c35f335fddbf937efceba5f
/priv/py/erlport/erlterms.py
69cc3cfb2a20a29dd20aad1fc2b28b97adb36c4d
[ "BSD-3-Clause" ]
permissive
gar1t/python_port
https://github.com/gar1t/python_port
b23b171126b1f678267b681d956bc4c8ba0c8b24
82a2b5df17d76e9ebbd65eaa71ee7459b9479fb6
refs/heads/master
2016-09-06T13:07:43.276656
2013-04-12T20:46:58
2013-04-12T20:46:58
9,401,671
1
0
null
null
null
null
null
null
null
null
null
null
null
null
null
# Copyright (c) 2009, 2010, Dmitry Vasiliev <[email protected]> # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # * Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # * Neither the name of the copyright holders nor the names of its # contributors may be used to endorse or promote products derived from this # software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. """Erlang external term format. See Erlang External Term Format for details: http://www.erlang.org/doc/apps/erts/erl_ext_dist.html """ __author__ = "Dmitry Vasiliev <[email protected]>" from struct import pack, unpack from array import array from zlib import decompressobj, compress from datetime import datetime class IncompleteData(ValueError): """Need more data.""" class Atom(str): """Erlang atom.""" def __new__(cls, s): if len(s) > 255: raise ValueError("invalid atom length") return super(Atom, cls).__new__(cls, s) def __repr__(self): return "atom(%s)" % self class String(unicode): """Erlang list/string wrapper.""" def __new__(cls, s): if isinstance(s, list): # Raise TypeError s = u"".join(unichr(i) for i in s) elif not isinstance(s, unicode): raise TypeError("list or unicode object expected") return super(String, cls).__new__(cls, s) def __repr__(self): return "string(%s)" % super(String, self).__repr__() class BitBinary(str): """Erlang bitstring whose length in bits is not a multiple of 8.""" def __new__(cls, s, bits): obj = super(BitBinary, cls).__new__(cls, s) obj.bits = bits return obj def __repr__(self): return "bits(%s, %s)" % (self.bits, super(BitBinary, self).__repr__()) def decode(string): """Decode Erlang external term.""" if len(string) < 1: raise IncompleteData("incomplete data: %r" % string) version = ord(string[0]) if version != 131: raise ValueError("unknown protocol version: %i" % version) if string[1:2] == '\x50': # compressed term if len(string) < 6: raise IncompleteData("incomplete data: %r" % string) d = decompressobj() zlib_data = string[6:] term_string = d.decompress(zlib_data) + d.flush() uncompressed_size = unpack('>I', string[2:6])[0] if len(term_string) != uncompressed_size: raise ValueError( "invalid compressed tag, " "%d bytes but got %d" % (uncompressed_size, len(term_string))) # tail data returned by decode_term() can be simple ignored return decode_term(term_string)[0], d.unused_data return decode_term(string[1:]) def decode_term(string, # Hack to turn globals into locals len=len, ord=ord, unpack=unpack, tuple=tuple, float=float, BitBinary=BitBinary, Atom=Atom): if len(string) < 1: raise IncompleteData("incomplete data: %r" % string) tag = ord(string[0]) tail = string[1:] if tag == 97: # SMALL_INTEGER_EXT if not tail: raise IncompleteData("incomplete data: %r" % string) return ord(tail[:1]), tail[1:] elif tag == 98: # INTEGER_EXT if len(tail) < 4: raise IncompleteData("incomplete data: %r" % string) i, = unpack(">i", tail[:4]) return i, tail[4:] elif tag == 106: # NIL_EXT return [], tail elif tag == 107: # STRING_EXT if len(tail) < 2: raise IncompleteData("incomplete data: %r" % string) length, = unpack(">H", tail[:2]) tail = tail[2:] if len(tail) < length: raise IncompleteData("incomplete data: %r" % string) return [ord(i) for i in tail[:length]], tail[length:] elif tag == 108: # LIST_EXT if len(tail) < 4: raise IncompleteData("incomplete data: %r" % string) length, = unpack(">I", tail[:4]) tail = tail[4:] lst = [] while length > 0: term, tail = decode_term(tail) lst.append(term) length -= 1 ignored, tail = decode_term(tail) return lst, tail elif tag == 109: # BINARY_EXT if len(tail) < 4: raise IncompleteData("incomplete data: %r" % string) length, = unpack(">I", tail[:4]) tail = tail[4:] if len(tail) < length: raise IncompleteData("incomplete data: %r" % string) return tail[:length], tail[length:] elif tag == 100: # ATOM_EXT if len(tail) < 2: raise IncompleteData("incomplete data: %r" % string) length, = unpack(">H", tail[:2]) tail = tail[2:] if len(tail) < length: raise IncompleteData("incomplete data: %r" % string) name = tail[:length] tail = tail[length:] if name == "true": return True, tail elif name == "false": return False, tail elif name == "none": return None, tail return Atom(name), tail elif tag == 104 or tag == 105: # SMALL_TUPLE_EXT, LARGE_TUPLE_EXT if tag == 104: if not tail: raise IncompleteData("incomplete data: %r" % string) arity = ord(tail[0]) tail = tail[1:] else: if len(tail) < 4: raise IncompleteData("incomplete data: %r" % string) arity, = unpack(">I", tail[:4]) tail = tail[4:] lst = [] while arity > 0: term, tail = decode_term(tail) lst.append(term) arity -= 1 return tuple(lst), tail elif tag == 70: # NEW_FLOAT_EXT term, = unpack(">d", tail[:8]) return term, tail[8:] elif tag == 99: # FLOAT_EXT return float(tail[:31].split("\x00", 1)[0]), tail[31:] elif tag == 110 or tag == 111: # SMALL_BIG_EXT, LARGE_BIG_EXT if tag == 110: if len(tail) < 2: raise IncompleteData("incomplete data: %r" % string) length, sign = unpack(">BB", tail[:2]) tail = tail[2:] else: if len(tail) < 5: raise IncompleteData("incomplete data: %r" % string) length, sign = unpack(">IB", tail[:5]) tail = tail[5:] if len(tail) < length: raise IncompleteData("incomplete data: %r" % string) n = 0 for i in array('B', tail[length-1::-1]): n = (n << 8) | i if sign: n = -n return n, tail[length:] elif tag == 77: # BIT_BINARY_EXT if len(tail) < 5: raise IncompleteData("incomplete data: %r" % string) length, bits = unpack(">IB", tail[:5]) tail = tail[5:] if len(tail) < length: raise IncompleteData("incomplete daata: %r" % string) return BitBinary(tail[:length], bits), tail[length:] raise ValueError("unsupported data tag: %i" % tag) def encode(term, compressed=False): """Encode Erlang external term.""" encoded_term = encode_term(term) # False and 0 do not attempt compression. if compressed: if compressed is True: # default compression level of 6 compressed = 6 zlib_term = compress(encoded_term, compressed) if len(zlib_term) + 5 <= len(encoded_term): # compressed term is smaller return '\x83\x50' + pack('>I', len(encoded_term)) + zlib_term return "\x83" + encoded_term def encode_term(term, # Hack to turn globals into locals pack=pack, tuple=tuple, len=len, isinstance=isinstance, list=list, int=int, long=long, array=array, unicode=unicode, Atom=Atom, BitBinary=BitBinary, str=str, float=float, ord=ord, dict=dict, datetime=datetime, True=True, False=False, ValueError=ValueError, OverflowError=OverflowError): if isinstance(term, tuple): arity = len(term) if arity <= 255: header = 'h%c' % arity elif arity <= 4294967295: header = pack(">BI", 105, arity) else: raise ValueError("invalid tuple arity") _encode_term = encode_term return header + "".join(_encode_term(t) for t in term) if isinstance(term, list): if not term: return "j" length = len(term) if length <= 65535: try: # array coersion will allow floats as a deprecated feature for t in term: if not isinstance(t, (int, long)): raise TypeError bytes = array('B', term).tostring() except (TypeError, OverflowError): pass else: if len(bytes) == length: return pack(">BH", 107, length) + bytes elif length > 4294967295: raise ValueError("invalid list length") header = pack(">BI", 108, length) _encode_term = encode_term return header + "".join(_encode_term(t) for t in term) + "j" elif isinstance(term, unicode): if not term: return "j" length = len(term) if length <= 65535: try: bytes = term.encode("latin1") except UnicodeEncodeError: pass else: return pack(">BH", 107, length) + bytes return encode_term([ord(i) for i in term]) elif isinstance(term, Atom): return pack(">BH", 100, len(term)) + term elif isinstance(term, BitBinary): # Must be before str type return pack(">BIB", 77, len(term), term.bits) + term elif isinstance(term, str): length = len(term) if length > 4294967295: raise ValueError("invalid binary length") return pack(">BI", 109, length) + term # must be before int type elif term is True or term is False: term = term and 'true' or 'false' return pack(">BH", 100, len(term)) + term elif isinstance(term, (int, long)): if 0 <= term <= 255: return 'a%c' % term elif -2147483648 <= term <= 2147483647: return pack(">Bi", 98, term) if term >= 0: sign = 0 else: sign = 1 term = -term bytes = array('B') while term > 0: bytes.append(term & 0xff) term >>= 8 length = len(bytes) if length <= 255: return pack(">BBB", 110, length, sign) + bytes.tostring() elif length <= 4294967295: return pack(">BIB", 111, length, sign) + bytes.tostring() raise ValueError("invalid integer value") elif isinstance(term, float): return pack(">Bd", 70, term) elif isinstance(term, dict): # encode dict as proplist, but will be orddict compatible if keys # are all of the same type. return encode_term(sorted(term.iteritems())) elif term is None: return pack(">BH", 100, 4) + "none" elif isinstance(term, datetime): return encode_term(((term.year, term.month, term.day), (term.hour, term.minute, term.second))) raise ValueError("unsupported data type: %s" % type(term))
UTF-8
Python
false
false
2,013
13,048,110,676,113
9eb52e853cf44b892004a1f24c83a33c435c2ffe
490ba6d14fb7662dcde4cdf15685691aae9fdb0b
/im/view_place.py
0fc41b01113f598a75212f54959507fffcc46d47
[]
no_license
xiasheng/im
https://github.com/xiasheng/im
982ff3b551f6508752620f493fd0bb8e52f1faf0
c45e730699f74dc93d47c7e6b7db69f6b0cb7d58
refs/heads/master
2021-01-21T11:46:19.693687
2014-03-04T09:02:24
2014-03-04T09:02:24
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
from django.http import HttpResponse from django.shortcuts import render_to_response from myapp.models import user_base, status, file_status from view_auth import GetAuthUserId, AuthException from view_common import MyHttpResponse from view_file import SaveFile import lbs def GetNearByUserList(request): ret = {'retcode': 0, 'info': 'success', 'size' : 0, 'hasmore' : 1, 'users' : [], } try: _uid = GetAuthUserId(request) _lat = float ( request.REQUEST.get('lat') ) _lng = float ( request.REQUEST.get('lng') ) _dis = int ( request.REQUEST.get('range', 10000)) _page_num = int ( request.REQUEST.get('pagenum', 0)) res = lbs.QueryUser(_lat, _lng, _dis, _page_num) _total = res['total'] _size = res['size'] if _total <= 10 or _size == 0: ret['hasmore'] = 0 #ret['total'] = _total for s in res['content']: if _uid == s[0]: _size -= 1 continue _user = user_base.objects.get(id=s[0]) ret['users'].append({'user':_user.toJSON(), 'distance':s[1]}) ret['size'] = _size except AuthException: ret['retcode'] = -2 ret['info'] = 'unauthorized' except AssertionError: ret['retcode'] = -1 ret['info'] = 'GetNearByUserList failed' return MyHttpResponse(ret) def GetNearByStatusList(request): ret = {'retcode': 0, 'info': 'success', 'size' : 0, 'hasmore' : 1, 'statuses' : [], } try: _uid = GetAuthUserId(request) _lat = float ( request.REQUEST.get('lat') ) _lng = float ( request.REQUEST.get('lng') ) _dis = int ( request.REQUEST.get('range', 10000)) _ts = int(request.REQUEST.get('timespan', 7200)) _page_num = int ( request.REQUEST.get('pagenum', 0)) res = lbs.QueryStatus(_lat, _lng, _dis, _ts, _page_num) _total = res['total'] _size = res['size'] if _total <= 10 or _size == 0: ret['hasmore'] = 0 #ret['total'] = _total for s in res['content']: ret['statuses'].append({'sid':s[0], 'distance':s[1]}) ret['size'] = _size except AuthException: ret['retcode'] = -2 ret['info'] = 'unauthorized' except : ret['retcode'] = -1 ret['info'] = 'GetNearByStatusList failed' return MyHttpResponse(ret) def GetNearByStatusDetail(request): ret = {'retcode': 0, 'info': 'success', 'total': 0, 'size' : 0, 'hasmore' : 1, } try: _uid = GetAuthUserId(request) _lat = float ( request.REQUEST.get('lat') ) _lng = float ( request.REQUEST.get('lng') ) _dis = int ( request.REQUEST.get('range', 10000)) _ts = int(request.REQUEST.get('timespan', 7200)) _page_num = int ( request.REQUEST.get('pagenum', 0)) res = lbs.QueryStatus(_lat, _lng, _dis, _ts, _page_num) _total = res['total'] _size = res['size'] if _total <= 10 or _size == 0: ret['hasmore'] = 0 sids = [] distances = [] for s in res['content']: sids.append(s[0]) distances.append(s[1]) _statuses = status.objects.filter(pk__in=sids) _list_statuses = [] i = 0 for s in _statuses: if _uid == s.user.id: _size -= 1 continue _s = s.toJSON() _s['distance'] = distances[i] _list_statuses.append(_s) i += 1 ret['statuses'] = _list_statuses #ret['total'] = _total ret['size'] = _size except AuthException: ret['retcode'] = -2 ret['info'] = 'unauthorized' except: ret['retcode'] = -1 ret['info'] = 'GetNearByStatusDetail failed' return MyHttpResponse(ret) def GetNearByPhotoList_LBS(request): ret = {'retcode': 0, 'info': 'success', 'size' : 0, 'hasmore' : 1, } try: _uid = GetAuthUserId(request) _lat = float ( request.REQUEST.get('lat') ) _lng = float ( request.REQUEST.get('lng') ) _dis = int ( request.REQUEST.get('range', 10000)) _ts = int(request.REQUEST.get('timespan', 7200)) _page_num = int ( request.REQUEST.get('page_num', 0)) _type = 2 res = lbs.QueryStatus(_lat, _lng, _dis, _ts, _page_num, _type) _total = res['total'] _size = res['size'] if _total <= 10 or _size == 0: ret['hasmore'] = 0 sids = [] distances = [] for s in res['content']: sids.append(s[0]) distances.append(s[1]) _photos = file_status.objects.filter(status__in=sids) _list_photos = [] i = 0 for s in _photos: _s = s.toJSON() _s['distance'] = distances[i] _list_photos.append(_s) i += 1 ret['photos'] = _list_photos #ret['total'] = _total ret['size'] = _size except AuthException: ret['retcode'] = -2 ret['info'] = 'unauthorized' except : ret['retcode'] = -1 ret['info'] = 'GetNearByPhotoList failed' return MyHttpResponse(ret) def GetNearByPhotoList(request): ret = {'retcode': 0, 'info': 'success', 'size' : 0, 'hasmore' : 1, } try: _uid = GetAuthUserId(request) _user = user_base(id=_uid) _page_num = int ( request.REQUEST.get('pagenum', 0)) _statuses = status.objects.filter(type=2).exclude(user=_user).order_by('-id')[_page_num*10:(_page_num+1)*10] _photos = [] for s in _statuses: _photos.append(s.toJSON()) ret['photos'] = _photos ret['size'] = len(_photos) ret['pagenum'] = _page_num if 10 == len(_photos): ret['hasmore'] = 1 else: ret['hasmore'] = 0 except AuthException: ret['retcode'] = -2 ret['info'] = 'unauthorized' except: ret['retcode'] = -1 ret['info'] = 'GetNearByPhotoList failed' return MyHttpResponse(ret)
UTF-8
Python
false
false
2,014
1,992,864,827,905
940f52e3a0132732d509c4b11c56c48b4e71be7c
bc0aca7eb60a567afeeeea2dcd56dd426be1c7d9
/solar-tracker.py
471351c02bf6833d03c37a7fa0d0756b124e6e22
[]
no_license
acharlton/tracker
https://github.com/acharlton/tracker
5c4d0ca8ad27d88cd8f2b4a81d89b41f504d6cd3
5fbb6396c05f2edfef292a8ce1608aed497c16b9
refs/heads/master
2021-01-16T18:03:04.491403
2014-02-09T06:54:03
2014-02-09T06:54:03
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
#!/usr/bin/python # script to log cpu_temp, RAM info, Disk info to a rrd database try: from Adafruit_PWM_Servo_Driver import PWM import os import re import time import spidev import RPi.GPIO as GPIO # GPIO references actual pin numbers (top left is 3.3v pin 2 is 5v) GPIO.setmode(GPIO.BOARD) # pin 12 is the base of the transistor->relay GPIO.setup(12,GPIO.OUT) GPIO.output(12,0) # pin, low or high GPIO.output(12,1) # pin, low or high # pin 15 is the base of the transistor->FET GPIO.setup(15,GPIO.OUT) GPIO.output(15,0) # pin 16 is the first LDR sensor GPIO.setup(16,GPIO.OUT) GPIO.output(16,0) GPIO.output(16,1) # pin 18 is the second LDR sensor GPIO.setup(18,GPIO.OUT) GPIO.output(18,0) GPIO.output(18,1) pwm = PWM(0x40, debug=True) pwm.setPWMFreq(60) spi = spidev.SpiDev() spi.open(0, 0) move = True difference = 0 tolerance = 0.15 print "Tolerance: " + str(tolerance) #saveenergy = 3600 snooze = 10 print "Snooze Period: " + str(snooze) # need to read last position pos = 500 max = 540 min = 100 # initialize servo print "panning servo - max left: " + str(max) pwm.setPWM(0,0,min) time.sleep(1) print "panning servo - max right: " + str(min) pwm.setPWM(0,0,max) time.sleep(2) print "starting...." + str(pos) def pause(t): localtime = time.asctime( time.localtime(time.time()) ) print "Local current time:\t", localtime print "sleeping.." + str(t) + "\n\n" time.sleep(t) def readadc(adcnum): if adcnum > 7 or adcnum < 0: return -1 r = spi.xfer2([1, 8 + adcnum << 4, 0]) adcout = ((r[1] & 3) << 8) + r[2] return adcout while move: GPIO.output(12,1) # transistor on to saturate 5v relay for servo power enable GPIO.output(15,1) # transistor on to FET move = True left = readadc(0) right = readadc(1) batt = readadc(3) battvolts = (batt * 10.47) / 1024 lvolts = (left * 2.8) / 1024 rvolts = (right * 3.3) / 1024 print ("\n\nCurrent Pos: \t\t" + str(pos) + " \nBattery: \t\t%5.3fv \nSensors: \t\t[left] %5.3fv \n\t\t\t[right] %5.3fv" % (battvolts, lvolts, rvolts)) difference = lvolts - rvolts if((-1*tolerance > difference) or (difference > tolerance)): if(left > right): pos = pos - 20 print "moving < " + str(pos) pwm.setPWM(0,0,pos) if(pos < min): pos = min pause(snooze) elif(left < right): pos = pos + 20 print "moving > " + str(pos) pwm.setPWM(0,0,pos) if(pos > max): pos = max pause(snooze) else: GPIO.output(12,0) GPIO.output(15,0) pause(snooze) time.sleep(0.1) except KeyboardInterrupt: GPIO.cleanup()
UTF-8
Python
false
false
2,014
2,362,232,059,278
99f6088f57eecb9997545b57f003ea504e639c8e
c0010b3adb6d59b9206e29c9a9438e854add3b05
/topo/tests/testaudio.py
c5a2976b9d4ef6ae81cd23fe599c139a578e7311
[ "BSD-3-Clause", "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-unknown-license-reference" ]
non_permissive
ioam/svn-history
https://github.com/ioam/svn-history
266d80f4509e5af679af2f18c7ce08852e17a831
42352f1ca541e6f3e52324536e40ce5df1aa7a61
refs/heads/master
2016-09-06T19:43:42.957796
2013-05-01T16:30:15
2013-05-01T16:30:15
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
""" $Id$ """ __version__='$Revision$' # CEBALERT: incomplete! import unittest from numpy.testing import assert_array_almost_equal from topo import pattern import topo.pattern.audio class TestAudio(unittest.TestCase): def setUp(self): self.audio = pattern.audio.AudioFile(filename="sounds/complex/daisy.wav") def test_basic(self): result = self.audio() suite = unittest.TestSuite() suite.addTest(unittest.makeSuite(TestAudio))
UTF-8
Python
false
false
2,013
12,275,016,534,581
9240dae121f1252a7ce4bd2d129c70b4686cfeb4
ce5fbc919b14446156e7074ec8f8ce36452f9838
/ckanext/dgvat_por/stream_filters.py
4c382914c072560c7f1e1bee3dcd09df72cfa2b0
[]
no_license
datagvat/ckanext-dgvat_por
https://github.com/datagvat/ckanext-dgvat_por
12aa7831cc20441f7ef1d3068e899801bcdae33d
c9299ea70f7a617c92b44c14133e250ce547c456
refs/heads/master
2021-01-21T22:29:09.761830
2014-01-07T11:44:23
2014-01-07T11:44:23
15,390,922
0
1
null
null
null
null
null
null
null
null
null
null
null
null
null
import re from genshi.filters.transform import Transformer from genshi.input import HTML import ckanext.dgvat_por.forms.html as html def package_id_filter(stream, pkg): data = {'id': pkg.id} html_code = html.DATASET_ID_CODE stream = stream | Transformer('body//ul[@class="property-list"]')\ .append(HTML(html_code % data)) return stream
UTF-8
Python
false
false
2,014
10,144,712,762,233
a009de1a41385d34b96574e14a4ae78d254b9596
94a98f03f28ba39e8374b1e283545df58f53d125
/education/contrib/user/management/commands/importusers.py
f0a119b70739b9c2dd41aa143098e19d3a8585c3
[]
no_license
khalib/cs-education
https://github.com/khalib/cs-education
18416bb29b1622f8fb2ed2c2a180abe7d1526197
83e7633697db53d11aea193e1a6059c632f49be7
refs/heads/master
2021-01-23T11:49:07.817905
2014-09-20T09:36:26
2014-09-20T09:36:26
20,694,364
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
import urllib, urllib2, json from pymongo import MongoClient from datetime import datetime, timedelta from django.conf import settings from django.core.management.base import BaseCommand, CommandError from django.core.files.base import ContentFile from education.core.utils import * from education.contrib.file.models import Image from education.contrib.user.models import * class Command(BaseCommand): args = '<count> <offset>' help = 'Import user data.' def handle(self, *args, **options): GLog('Importing user data.', 'header') # Set parameters. count = 99999999 if len(args) >= 2: count = int(args[1]) skip = 0 if len(args) == 3: skip = int(args[2]) i = skip + 1 # Setup MongoDB connection. client = MongoClient(settings.EDUCATION_MONGODB_HOST, settings.EDUCATION_MONGODB_PORT) db = client[settings.EDUCATION_MONGODB_DATABASE] # Get and index review data. collection = db['users'] users = collection.find({}) for data in users: try: user = User.objects.get(uid=int(data['uid'])) except User.DoesNotExist: user = User() created = datetime.fromtimestamp(int(data['created'])) login = datetime.fromtimestamp(int(data['login'])) action = 'SKIPPED' if user.login is None: action = 'CREATED' elif user.login.replace(tzinfo=None) < login: action = 'UPDATED' if user.login is None or user.login.replace(tzinfo=None) < login: # Save the user data. user = User() user.uid = data['uid'] user.email = data['mail'].encode('utf8') user.username = data['name'].encode('utf8') user.status = int(data['status']) user.created = created user.login = login user.save() GLog('[%s] %s. %s - %s %s' % (action, i, user.uid, user.username, data['mail'])) i += 1
UTF-8
Python
false
false
2,014
17,806,934,429,089
2e71bccee4ef5dc9a283d2bde24516156d6181dc
43ea0c40e56617796654b9f3157febca8b84f8b3
/pytask/settings.py
61b5cf7835a6add3150330848e9a4da4e9be5887
[ "AGPL-3.0-only", "LicenseRef-scancode-unknown-license-reference" ]
non_permissive
madhusudancs/pytask
https://github.com/madhusudancs/pytask
2d1d80bcac77ba860490b6220f3a9c4a9648c391
03ba2d5fe196924a1b4fc5128c2062fccbd9fec9
refs/heads/master
2021-01-01T15:50:22.614610
2011-07-18T18:35:33
2011-07-18T18:35:33
1,334,050
3
2
null
null
null
null
null
null
null
null
null
null
null
null
null
#!/usr/bin/env python # # Copyright 2011 Authors of PyTask. # # This file is part of PyTask. # # PyTask is free software: you can redistribute it and/or modify it # under the terms of the GNU Affero General Public License as published # by the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # PyTask is distributed in the hope that it will be useful, but WITHOUT # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or # FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License # for more details. # # You should have received a copy of the GNU General Public License # along with PyTask. If not, see <http://www.gnu.org/licenses/>. __authors__ = [ '"Madhusudan.C.S" <[email protected]>', '"Nishanth Amuluru" <[email protected]>', ] # Django settings for pytask project. import os from pytask.local import * ADMINS = ( ('Madhusudan C.S.', '[email protected]'), ) MANAGERS = ADMINS # Local time zone for this installation. Choices can be found here: # http://en.wikipedia.org/wiki/List_of_tz_zones_by_name # although not all choices may be available on all operating systems. # On Unix systems, a value of None will cause Django to use the same # timezone as the operating system. # If running in a Windows environment this must be set to the same as your # system time zone. TIME_ZONE = 'Asia/Kolkata' # Language code for this installation. All choices can be found here: # http://www.i18nguy.com/unicode/language-identifiers.html LANGUAGE_CODE = 'en-us' # If you set this to False, Django will make some optimizations so as not # to load the internationalization machinery. USE_I18N = True # If you set this to False, Django will not format dates, numbers and # calendars according to the current locale USE_L10N = True # Absolute filesystem path to the directory that will hold user-uploaded files. # Example: "/home/media/media.lawrence.com/" MEDIA_ROOT = os.path.join(os.path.dirname(__file__), 'media') # URL that handles the media served from MEDIA_ROOT. Make sure to use a # trailing slash if there is a path component (optional in other cases). # Examples: "http://media.lawrence.com", "http://example.com/media/" MEDIA_URL = '/pytask/media/' # Absolute path to the directory that holds static files. # Example: "/home/static-files/static-files.lawrence.com/" STATIC_ROOT = os.path.join(os.path.dirname(__file__), 'static') # URL that handles the static files served from STATIC_ROOT. Make sure to use # a trailing slash if there is a path component (optional in other cases). # Examples: "http://static-files.lawrence.com", # "http://example.com/static-files/" STATIC_URL = '/pytask/static/' # URL prefix for admin media -- CSS, JavaScript and images. Make sure to use a # trailing slash. # Examples: "http://foo.com/media/", "/media/". ADMIN_MEDIA_PREFIX = '/pytask/admin_media/' # Make this unique, and don't share it with anybody. SECRET_KEY = '^ww=xk&idt)=03kqg*fz8x%=dqbhh1kd2z=f%$m@r9_+9b=&x=' TEMPLATE_CONTEXT_PROCESSORS = ( 'django.contrib.auth.context_processors.auth', 'django.core.context_processors.debug', 'django.core.context_processors.i18n', 'django.core.context_processors.media', 'django.contrib.messages.context_processors.messages', 'pytask.taskapp.context_processors.configuration', ) # List of callables that know how to import templates from various sources. TEMPLATE_LOADERS = ( 'django.template.loaders.filesystem.Loader', 'django.template.loaders.app_directories.Loader', # 'django.template.loaders.eggs.Loader', ) MIDDLEWARE_CLASSES = ( 'django.middleware.common.CommonMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'pytask.middleware.exceptions.ExceptionMiddleware', ) ROOT_URLCONF = 'pytask.urls' TEMPLATE_DIRS = ( # Put strings here, like "/home/html/django_templates" or "C:/www/django/templates". # Always use forward slashes, even on Windows. # Don't forget to use absolute paths, not relative paths. os.path.join(os.path.dirname(__file__), 'templates'), ) INSTALLED_APPS = ( 'django_extensions', 'registration', 'tagging', 'south', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.sites', 'django.contrib.messages', 'django.contrib.admin', 'pytask', 'pytask.profile', 'pytask.taskapp', ) AUTH_PROFILE_MODULE = 'profile.Profile' #django-registration ACCOUNT_ACTIVATION_DAYS = 7 DEFAULT_FROM_EMAIL = 'FOSSEE Admin <[email protected]>'
UTF-8
Python
false
false
2,011
2,233,383,036,866
b79bade3fad05c065f5ebcfef997fbd9e2c0f477
ec6f01373a55584925d43754337f85dda54e83a3
/tests.py
2375ab539424c76c50fb50ea64963b87a47d48cc
[]
no_license
amontalenti/pistolwip
https://github.com/amontalenti/pistolwip
253eca0f91a0c050239bde74a6b6a48df0e02144
260d2fe2a7753379eab0b207acbe118d3ef5d51f
refs/heads/master
2021-01-01T18:49:08.654576
2012-04-06T17:57:19
2012-04-06T17:57:19
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
from wip import _get_pivotal_tracker, stories_by_user, current_stories from nose.tools import assert_equal, assert_not_equal, assert_true, \ assert_false, assert_raises, assert_in, assert_is, assert_is_not, \ assert_is_not_none, assert_sequence_equal, assert_set_equal, \ assert_list_equal, assert_dict_equal, assert_items_equal from pprint import pprint def test_connection(): tracker = _get_pivotal_tracker() assert_is_not_none(tracker) i = 0 try: for project in tracker.projects.all(): i += 1 pass except: assert_false(True, "no exceptions should occur while iterating projects in connection") assert_true(i > 0, "more than one project should exist") def test_stories_by_user(): stories = stories_by_user() assert_is_not_none(stories) assert_true(len(stories) > 0, "at least one story should exist in story map") #pprint(stories) def test_current_stories(): stories = current_stories() #pprint(stories)
UTF-8
Python
false
false
2,012
10,273,561,813,133
50e0d8115e2527a473448f9395a19f3c3f8f4835
77235fcfc0724ab281975c43f1977190095c6f06
/p_Bootstrap.py
feb419f935edeb40085bf1f8d9efc58356ddfb28
[ "GPL-2.0-only", "LicenseRef-scancode-warranty-disclaimer", "LicenseRef-scancode-unknown-license-reference", "GPL-1.0-or-later" ]
non_permissive
pforquesato/p_Metrics
https://github.com/pforquesato/p_Metrics
9d9783150a125f38f4345733125a0ad15510f38d
b896ca19972933a9d23a4dcfba16cdf6421f7630
refs/heads/master
2016-09-03T03:28:26.195585
2014-12-16T17:25:36
2014-12-16T17:25:36
22,621,236
1
0
null
null
null
null
null
null
null
null
null
null
null
null
null
import numpy as np from scipy.stats import t from random import randint import scipy.sparse as sparse def p_Bootstrap(W, repNumber=200, statistic='se', bCluster=None, method='nonparametric'): ''' Implements Bootsrap method on data, obtaining the target statistic. __author__ = 'Pedro Forquesato <[email protected]>' ... Arguments --------- W : np.array A two-dimensional array W with the data to be sampled. rep_number : int The number of replications of data sampling performed by the bootstrapping procedure. Defaults to 200, following Efron and Tibsharani (1993), p. 52. statistic : str A string with the statistic to be bootstrapped. Defaults to Std. Errors 'se', and so far only implemented statistic. cluster : bool Whether to calculate Cluster-robust Bootstrapped Standard Errors. Default to False. method : str One of possible methods of performing bootstrap. Defaults to 'nonparametric', other possibilities are 'parametric' and 'residual'. For more information, see Cameron & Triverdi (2005), p.360-361. ''' # List where the bootstrapped repetitions will be sent bootList = list() # Sets up parameters N = len(W[:, 0]) K = len(W[0, 1:]) if bCluster is not None: # Transform factor into dummies numberClusters = len(bCluster.columns) # Check if cluster has one class if numberClusters == 1: raise Exception('This factor has only one unique value!') nSampling = N / numberClusters #repNumber = 400 for rep in xrange(repNumber): # See Cameron & Triverdi (2005), p.360 for the bootstrap algorithm # Calculate the target statistic if statistic is 'se' and method is 'nonparametric': if bCluster is None: # Gets indexes of a sample with replacement sampleIndx = [randint(0, N - 1) for i in xrange(N)] # Get the sample database smple = W[sampleIndx, :] # Define X and y for the sample X_b = smple[:, 1:] y_b = smple[:, 0] # Calculate the OLS parameters XX_b = np.dot(X_b.T, X_b) if np.linalg.det(XX_b) != 0: XX_b_inv = np.linalg.inv(XX_b) else: raise ValueError("X'X (bootstrap) is singular!") Xy_b = np.dot(X_b.T, y_b) betaB = np.dot(XX_b_inv, Xy_b) # Add them to boot_list bootList.append(betaB) else: # With cluster, we sample with replacement each cluster and then # use all observations inside the cluster. # See Cameron & Triverdi (2005), # p.708 for an application to panel data estimation. # Sample from clusters sampleIndx = [randint(0, numberClusters - 1) for i in xrange(nSampling)] # Get the sample data chosen = list() for rndom in sampleIndx: chosen.append(W[np.array(bCluster.iloc[:, rndom] == 1), :]) chosenW = np.concatenate(chosen) # And now it is just as before # Define X and y for the sample X_b = chosenW[:, 1:] y_b = chosenW[:, 0] # Calculate the OLS parameters XX_b = np.dot(X_b.T, X_b) if np.linalg.det(XX_b) != 0: XX_b_inv = np.linalg.inv(XX_b) else: raise ValueError("X'X (bootstrap) is singular!") Xy_b = np.dot(X_b.T, y_b) betaB = np.dot(XX_b_inv, Xy_b) # Add them to boot_list bootList.append(betaB) else: raise NotImplementedError('Sorry!') if statistic is 'se': # See Cameron & Triverdi (2005), p. 362 for details thetaHat = np.sum(bootList, axis=0) / float(repNumber) for i in xrange(repNumber): bootList[i].shape = (K, 1) thetaHat.shape = (K, 1) bootDiff = bootList[i] - thetaHat bootList[i] = np.dot(bootDiff, bootDiff.T) varCoVar = np.sum(bootList, axis=0) / float(repNumber - 1) # Returns result return varCoVar # This is the end.
UTF-8
Python
false
false
2,014
13,529,147,030,973
d21767f27f9bfbd03af60007bf95e11687ecd76f
764b7557e231db7ee89a4b304b9682fffbe27c7e
/ott/controller/services/model/response_base.py
bf24fe6e51b997baf64e10ec23259a2a8cfe83c7
[]
no_license
EarthShipSolution1/controller
https://github.com/EarthShipSolution1/controller
ad6b23c128b4c33613e3f1e0fc87ec9ca5485836
1fa5f42ab610677deb24a7bbb941d76b280003fc
refs/heads/master
2017-11-15T05:27:23.041851
2014-02-28T06:00:21
2014-02-28T06:00:21
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
import simplejson as json import calendar, datetime class SerializerRegistry(object): ''' @see: http://stackoverflow.com/questions/4821940/how-to-make-simplejson-serializable-class this class will help serialize abitrary python objects into JSON (along with date / datetime handelling) ''' def __init__(self): self._classes = {} def add(self, cls): self._classes[cls.__module__, cls.__name__] = cls return cls def object_hook(self, dct): module, cls_name = dct.pop('__type__', (None, None)) if cls_name is not None: return self._classes[module, cls_name].from_dict(dct) else: return dct def default(self, obj): if isinstance(obj, datetime.datetime) or isinstance(obj, datetime.date): # simple / stupid handling of date and datetime object serialization return str(obj) else: dct = obj.to_dict() dct['__type__'] = [type(obj).__module__, type(obj).__name__] return dct registry = SerializerRegistry() @registry.add class ResponseBase(object): def __init__(self): self.status_code = '200' self.status_message = None self.has_errors = False def __repr__(self): return str(self.__dict__) def to_dict(self): return self.__dict__ @classmethod def from_dict(cls, dct): return cls(**dct) def to_json(self, pretty=False): if pretty: ret_val = json.dumps(self, default=registry.default, indent=4, sort_keys=True) else: ret_val = json.dumps(self, default=registry.default) return ret_val def from_json(self, str): return json.loads(str, object_hook=registry.object_hook) @classmethod def format_template_from_dict(cls, dict, template): ret_val = template try: ret_val = template.format(**dict) except: pass return ret_val class DatabaseNotFound(ResponseBase): def __init__(self): super(DatabaseNotFound, self).__init__() self.status_code = '404' self.status_message = 'Data Not Found' self.has_errors = True class ServerError(ResponseBase): def __init__(self): super(ServerError, self).__init__() self.status_code = '500' self.status_message = 'Internal Server Error' self.has_errors = True
UTF-8
Python
false
false
2,014
15,814,069,599,717
0cae2edbdad0067f14e126e85bfcb6859fea6e92
1c9c46879625e2ad79f13ec14dc3cfb4fe5611e8
/test/test_svm.py
1d53dbe9abee2873a096c69c2a8d1879621bc500
[ "Apache-2.0" ]
permissive
tangsengedu/cs224u
https://github.com/tangsengedu/cs224u
ed6ac91c171d59529401ffcb035b50567193b984
f6aa818761c95e5940af2d9597aa392479e3945c
refs/heads/master
2019-09-07T13:39:26.774461
2012-05-18T18:55:49
2012-05-18T18:55:49
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
from learn import SVM, write_dataset import numpy import os import random import unittest class Test_svm(unittest.TestCase): """Test our rank svm implementation.""" def simpleTestGradesAndFeatures(self, num_grades): grades = range(num_grades) features = [] for grade in grades: r = num_grades * (1 - 2 * random.random()) features.append([float(grade) / 2 + r, float(grade) / 2 - r]) return grades, features def setUp(self): os.system('cd learn && make > /dev/null') def test_simple(self): test_data_filename = '/tmp/cs224u_features_test.dat' test_model_filename = '/tmp/cs224u_model_test.dat' test_scores_filename = '/tmp/cs224u_scores_test' num_grades = 10 grades, features = self.simpleTestGradesAndFeatures(num_grades) write_dataset(test_data_filename, features, grades) os.system('learn/rank_svm %s %s > /dev/null' %(test_data_filename, test_model_filename)) os.system('learn/rank_svm %s %s %s > /dev/null' %(test_data_filename, test_model_filename, test_scores_filename)) scores = [(ind, float(score)) for ind,score in enumerate(open(test_scores_filename).readlines())] scores.sort(key=lambda tup: tup[1]) ranking = [tup[0] for tup in scores] self.assertEqual(range(10), ranking) def test_svm_py(self): """Same as test_simple, but use the SVM class to drive the test.""" svm = SVM() num_grades = 15 grades, features = self.simpleTestGradesAndFeatures(num_grades) features = numpy.asarray(features) svm.train(features, grades) scores = [(ind, float(score)) for ind,score in enumerate(svm.classify_rank_svm(features))] scores.sort(key=lambda tup: tup[1]) ranking = [tup[0] for tup in scores] self.assertEqual(range(num_grades), ranking) if __name__ == '__main__': unittest.main()
UTF-8
Python
false
false
2,012
523,986,012,950
b01c04545df2b7a809b8bd844df3965f48d6ab7e
177ab92e498e3fa0b1404bf783c0cbb437c0ac55
/tests/test_range.py
7a8efd75e5217cc7dc4564cedcc2a12dd769470e
[ "LGPL-2.1-only" ]
non_permissive
GNOME/testinggtk
https://github.com/GNOME/testinggtk
af60fad3a81e31783989ed5df9919940285168cb
4d32d592d4d2322fc140ab39a64391806cb3497b
refs/heads/master
2016-08-04T19:17:06.550366
2009-07-10T14:03:27
2009-07-10T14:03:27
4,614,853
2
0
null
null
null
null
null
null
null
null
null
null
null
null
null
''' Tests for the ``gtk.Range`` class. ''' import gtk from gtk import gdk def test_default_properties(): range = gtk.HScrollbar() assert range.get_value() == 0.0 assert isinstance(range.get_adjustment(), gtk.Adjustment) def test_scroll_event_signal(): ''' Ensure that the ``change-value`` signal is emitted when the range receives a ``scroll-event``. :bug: #524203 ''' def sig_change_value(range, scroll_type, value): sig_change_value.called = True sig_change_value.called = False range = gtk.HScrollbar() range.connect('change-value', sig_change_value) ev = gdk.Event(gdk.SCROLL) ev.direction = gdk.SCROLL_DOWN gtk.Range.do_scroll_event(range, ev) assert sig_change_value.called def test_set_get_inverted(): range = gtk.HScrollbar() assert not range.get_inverted() range.set_inverted(True) assert range.get_inverted() range.set_inverted(False) assert not range.get_inverted() def test_destroy(): ''' Disabled due to segfault :bug: #551317 ''' # range = gtk.HScrollbar() # range.destroy() # range.set_lower_stepper_sensitivity(gtk.SENSITIVITY_ON)
UTF-8
Python
false
false
2,009
19,000,935,326,258
09f4d8cbde572f04b2b8b70c1891d47701e15759
1bd871343dd721b691033d8b886b1fd27e76cc75
/smart/views/preferences_store.py
6c89bfb9bf0b952219e445c3cf336438f7f96748
[ "Apache-2.0" ]
permissive
binbrain/smart_server
https://github.com/binbrain/smart_server
2610cec93933fcf646fe85615ac5c92f9ab48ea0
84094b6e5f812c6aacc656eaf52d4e6b8834f22c
refs/heads/master
2017-05-03T04:20:31.546079
2012-12-28T03:15:46
2012-12-28T03:15:46
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
from smart.models import * from base import utils from django.http import HttpResponse from smart.models.ontology_url_patterns import CallMapper @CallMapper.register(client_method_name="put_user_preferences") def preferences_put (request, user_id, pha_email): try: ct = utils.get_content_type(request).lower().split(';')[0] if (not ct) or len(ct) == 0 or ct == "none": ct = "text/plain" except: ct = "text/plain" p = fetch_preferences (user_id, pha_email) p.data = request.raw_post_data p.mime = ct p.save() return HttpResponse(p.data, mimetype=p.mime) @CallMapper.register(client_method_name="get_user_preferences") def preferences_get (request, user_id, pha_email): p = fetch_preferences (user_id, pha_email) return HttpResponse(p.data, mimetype=p.mime) @CallMapper.register(client_method_name="delete_user_preferences") def preferences_delete(request, user_id, pha_email): p = fetch_preferences (user_id, pha_email) p.delete() return HttpResponse("ok") def resolve_account_pha (user_id, pha_email): account = None pha = None if user_id != None: try: account = Account.objects.get(email=user_id) except Account.DoesNotExist: pass if pha_email != None: try: pha = PHA.objects.get(email=pha_email) except PHA.DoesNotExist: pass return account, pha def fetch_preferences(user_id, pha_email): account, pha = resolve_account_pha (user_id, pha_email) return Preferences.objects.get_or_create(account=account, pha=pha, defaults={"data": "", "mime": "text/plain"})[0]
UTF-8
Python
false
false
2,012
10,625,749,130,887
f8f45a01428a591189e51828fc571a40176f8531
2d2e19d3145aa3d13c12b6b767b6018a1b9294c4
/florence/apps/cms_events/admin.py
4cf2029c467422d868cde26f2dcd2c9839154465
[]
no_license
baconz/florencegrendedotcom
https://github.com/baconz/florencegrendedotcom
9b98083dee9ca48740634fd88c7688f208891e04
6d40f30b4676a37dd423353d791f41055618a1fe
refs/heads/master
2020-04-05T15:38:00.807277
2012-10-21T23:49:44
2012-10-21T23:49:44
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
from django.contrib import admin from florence.apps.cms_events.models import Event class EventAdmin(admin.ModelAdmin): fieldsets = [ (None, {'fields': ['title']}), (None, {'fields': ['event_date']}), ("Other Info", {'fields': ['description', 'address', 'url']}), ] admin.site.register(Event, EventAdmin)
UTF-8
Python
false
false
2,012
652,835,068,499
cec055ec489d5fe399bc054db461749c227f0d7a
e20d40836100d3cb28f1714e682ea1ff4415dd83
/sim.py
a419a07803a6d4907e18b787dbc5925f65b4b4f6
[ "GPL-2.0-only" ]
non_permissive
twoshanks/3dRoboExperiment
https://github.com/twoshanks/3dRoboExperiment
67eb3d6a110186dff5401a1ff64f230163780c4b
6748be35f631252aaafc99fcf1391627529b3fe5
refs/heads/master
2020-05-31T19:04:02.487743
2014-07-09T20:22:09
2014-07-09T20:22:09
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
from visual import * import Image, time, thread, collisiondetection from math import * from objects import * from variables import * import Tkinter as tk #from usercode import usercode ''' ################# Usercode Function ################# ''' if SWARM_MODE == False: def usercode0(): seen=False grabbed = False while True: templist = [] markers = R.see() for m in markers: print m.marker_type if m.marker_type != "token arena": templist.append(m) print templist markers = templist[:] markers = sorted(markers, key=lambda marker: marker.distance) print len(markers) if grabbed: R.motors[0].speed = -10 R.motors[1].speed = 10 time.sleep(4) R.motors[0].speed = 70 R.motors[1].speed = 70 time.sleep(2) R.motors[0].speed = 0 R.motors[1].speed = 0 R.claw.openclaw(R.box.axis) time.sleep(1) R.motors[0].speed = -10 R.motors[1].speed = -10 time.sleep(3) R.motors[0].speed = 10 R.motors[1].speed = -10 time.sleep(3) grabbed = False elif len(markers)>0: angle = markers[0].bearing.y if angle >10 and angle <30: R.motors[0].speed = -10 R.motors[1].speed = 10 time.sleep(0.2) elif angle < -10 and angle > -30: R.motors[0].speed = 20 R.motors[1].speed = -20 time.sleep(0.2) elif angle <10 and angle >-10: R.motors[0].speed = 30 R.motors[1].speed = 30 time.sleep(m.distance/3) seen = True elif seen: R.motors[0].speed = 50 R.motors[1].speed = 50 time.sleep(1) R.motors[0].speed = 0 R.motors[1].speed = 0 R.claw.closeclaw(R.box.axis) seen=False grabbed = True else: R.motors[0].speed = -10 R.motors[1].speed = 10 def usercode1(): while True: markers = S.see() print len(markers) S.motors[0].speed = -50.0 S.motors[1].speed = -50.0 time.sleep(2) S.motors[0].speed = 50.0 S.motors[1].speed = -50.0 time.sleep(0.5) def usercode2(): while True: markers = T.see() print len(markers) T.motors[0].speed = -50.0 T.motors[1].speed = -50.0 time.sleep(2) T.motors[0].speed = 50.0 T.motors[1].speed = -50.0 time.sleep(0.5) def usercode3(): while True: markers = S.see() print len(markers) U.motors[0].speed = -50.0 U.motors[1].speed = -50.0 time.sleep(2) U.motors[0].speed = 50.0 U.motors[1].speed = -50.0 time.sleep(0.5) if SWARM_MODE == True: def usercode(number): while True: robot_list[number].motors[0].speed = -50.0 robot_list[number].motors[1].speed = 50.0 markers = R.see() ''' ############################# Movement update and collision ############################# ''' if __name__ == "__main__": for x in xrange(41,41+NUMBER_OF_TOKENS): token_list.append(Token(x)) print len(token_list) for thing in token_list[x-41].markers: marker_list.append(thing) if SWARM_MODE == False: R = Robot(0,15,0) #S = Robot(-150,15,-150) #T = Robot(150,15,-150) #U = Robot(-150,15,150) thread.start_new_thread(usercode0,()) #thread.start_new_thread(usercode1,()) #thread.start_new_thread(usercode2,()) #thread.start_new_thread(usercode3,()) while True: rate(RATE) R.update() #S.update() #T.update() #U.update() if SWARM_MODE == True: for x in xrange(SWARM_NUMBER): robot_list.append(Robot(random.randint(-150,150),15,random.randint(-150,150))) counter = 0 while counter < SWARM_NUMBER: counter2 = counter thread.start_new_thread(usercode,(counter,)) counter +=1 while True: rate(RATE) for r in robot_list: r.update()
UTF-8
Python
false
false
2,014
10,402,410,819,015
8a80a4c757f47dc9e9fdf2a3d57b08d0a1c8e73e
a79439f5e421a2812444d808ad2eba91ac3666ed
/evaluation/eval_utils/result_tracker.py
f87435bcb4c05e18ffc97cf8f8e376756be8e6d0
[ "Apache-2.0" ]
permissive
rm/ad-eval
https://github.com/rm/ad-eval
42601783198d717f6a8db59ce9f09eaf7512741a
33418ecec8effba448c48c524667140b97151390
refs/heads/master
2021-01-15T10:06:40.133792
2013-03-09T13:39:32
2013-03-09T13:39:32
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
from __future__ import division import numpy class ResultTracker(object): """ Keeps track of and displays test results. Supports limited aggregation and filtering of results. Note that while the aggregation is inefficient, this is not likely to be a problem except for huge tests. """ def __init__(self, suite_label): self._suite_label = suite_label self._results = [] self._anomaly_detector_labels = set() self._test_labels = set() def add_record(self, anomaly_detector_label, test_label, execution_time=None, equal_support_distance=None, full_support_distance=None, best_support_distance=None, normalized_euclidean_distance=None, anomaly_vector=None): self._anomaly_detector_labels.add(anomaly_detector_label) self._test_labels.add(test_label) self._results.append({ 'anomaly_detector': anomaly_detector_label, 'test': test_label, 'execution_time': execution_time, 'equal_support_distance': equal_support_distance, 'full_support_distance': full_support_distance, 'best_support_distance': best_support_distance, 'normalized_euclidean_distance': normalized_euclidean_distance, 'anomaly_vector': anomaly_vector }) def get_filtered_key_values(self, key, filter_predicate): """ Returns the values of the field specified by key, in all entries, filtered by filter_predicate. Entries that do not contain the key are ignored. """ wrapped_filter = lambda x: x[key] is not None and filter_predicate(x) return [x[key] for x in filter(wrapped_filter, self._results)] def get_filtered_sum_over_key(self, key, filter_predicate): """ Returns the average of the field specified by key, in all elements, filtered by filter_predicate. """ return sum(self.get_filtered_key_values(key, filter_predicate)) def get_filtered_avg_over_key(self, key, filter_predicate): """ Returns the average of the field specified by key, in all elements, filtered by filter_predicate. """ values = self.get_filtered_key_values(key, filter_predicate) if len(values) == 0: return None else: return sum(values) / len(values) def get_anomaly_detector_averages(self, ad_labels, key): """ Returns a list of the average value of 'key' over the anomaly detector labels in ad_labels. """ filter_function = lambda x: x['anomaly_detector'] == l return [self.get_filtered_avg_over_key(key, filter_function) for l in ad_labels] def print_results(self): self._print_header() for ad_label in self._anomaly_detector_labels: self._print_anomaly_detection_header(ad_label) for test_label in self._test_labels: self._print_test_results(ad_label, test_label) self._print_anomaly_detection_totals(ad_label) def _print_header(self): print("\n\nResults for test suite '%s':" % self._suite_label) def _print_anomaly_detection_header(self, label): print("\n\tAnomaly detector '%s':" % label) def _print_test_results(self, ad_label, test_label): print("\n\t\tTest '%s':" % test_label) relevance_filter = lambda x: x['anomaly_detector'] == ad_label and x['test'] == test_label self._print_test_details(relevance_filter) def _print_anomaly_detection_totals(self, ad_label): print("\n\t\tTotal:") relevance_filter = (lambda x: x['anomaly_detector'] == ad_label) self._print_test_details(relevance_filter) def _print_test_details(self, relevance_filter): total_execution_time = self.get_filtered_sum_over_key('execution_time', relevance_filter) avg_equal_support = self.get_filtered_avg_over_key('equal_support_distance', relevance_filter) avg_full_support = self.get_filtered_avg_over_key('full_support_distance', relevance_filter) avg_best_support = self.get_filtered_avg_over_key('best_support_distance', relevance_filter) avg_euclidean = self.get_filtered_avg_over_key('normalized_euclidean_distance', relevance_filter) if total_execution_time is not None: print("\t\t\tTotal execution time (s): %.2f" % total_execution_time) if avg_equal_support is not None: print("\t\t\tAverage equal support distance: %.3f" % avg_equal_support) if avg_full_support is not None: print("\t\t\tAverage full support distance: %.3f" % avg_full_support) if avg_best_support is not None: print("\t\t\tAverage best support distance: %.3f" % avg_best_support) if avg_euclidean is not None: print("\t\t\tAverage normalized Euclidean distance: %.3f" % avg_euclidean)
UTF-8
Python
false
false
2,013
16,295,105,923,366
a4259dfdd0dab7c66559919e97e70b82a597fdde
51a94a0a04ff69d68108f6a9f659ff7a0076b629
/domaintools/exceptions.py
cbcb1e3cb110dfbda0c250f00e6a013780924d52
[ "MIT" ]
permissive
DomainTools/api-python
https://github.com/DomainTools/api-python
bdfa88627edf17bb8a010c2b3377bf946c5efdc9
267db5a42a711b0704f2cf87aae8d7cfb8379b5c
refs/heads/master
2018-12-31T19:37:54.846537
2012-07-17T10:45:39
2012-07-17T10:45:39
2,570,132
8
5
MIT
false
2019-03-20T12:35:49
2011-10-13T15:10:47
2018-10-07T17:44:55
2016-04-14T00:31:36
158
17
4
5
Python
false
null
""" This file is part of the domaintoolsAPI_python_wrapper package. For the full copyright and license information, please view the LICENSE file that was distributed with this source code. """ class ServiceException(Exception): INVALID_CONFIG_PATH = "Config file do not exist"; UNKNOWN_SERVICE_NAME = "Unknown service name"; EMPTY_API_KEY = "Empty API key"; EMPTY_API_USERNAME = "Empty API username"; UNKNOWN_RETURN_TYPE = "Unknown return type. (json or xml or html required)"; INVALID_DOMAIN = "Domain/Ip invalide"; INVALID_OPTIONS = "Invalid options; options must be an array"; TRANSPORT_NOT_FOUND = "Transport not found; it must refer to a class that extends RESTService"; DOMAIN_CALL_REQUIRED = "Domain is required for this service"; IP_CALL_REQUIRED = "Ip address is required for this service"; EMPTY_CALL_REQUIRED = "No domain or ip is required for this service"; INVALID_REQUEST_OBJECT = "Invalid object; DomaintoolsAPI instance required"; INVALID_JSON_STRING = "Invalid json string; a valid one is required"; class BadRequestException(ServiceException): pass class InternalServerErrorException(ServiceException): pass class NotAuthorizedException(ServiceException): pass class NotFoundException(ServiceException): pass class ServiceUnavailableException(ServiceException): pass
UTF-8
Python
false
false
2,012
15,839,839,403,329
f3bacef5a38d0175f8703a44c306c51a5f8f7ad2
cc93c10575a7bb54911bc0079b58a24cfc647fce
/src/simulation.py
58b990bd3561bb5fb3b9d98c1ee94693cbab0948
[]
no_license
tools4origins/simElec
https://github.com/tools4origins/simElec
c6b59f4c70bbff842374b0fa2ea934e9d297d563
1cd1b3f83c01bfed8ed289b9bb07d7434b20e247
refs/heads/master
2021-01-23T22:06:09.750626
2014-08-26T20:38:00
2014-08-26T20:38:00
22,862,245
1
0
null
null
null
null
null
null
null
null
null
null
null
null
null
from sympy import Eq, solve from src.circuit import * from src.theorems import * class Simulation: def __init__(self, circuit): print("Simulation en cours") self.circuit = circuit if len(circuit.wires) == 0: print("Attention : Aucun fil trouvé, simulation annulée") return if len(circuit.components) == 0: print("Attention : Aucun composant trouvé, simulation annulée") return self.list_unknowns() self.establish_equations() self.solve_equations() def establish_equations(self): self.equations = [] for theorem in theorem_list: if theorem.target_class == Wire: for wire in self.circuit.wires: if theorem.support(wire): self.equations += theorem.apply(wire) elif theorem.target_class == Component: for component in self.circuit.components: if theorem.support(component): self.equations += theorem.apply(component) self.equations += [ Eq(self.circuit.wires[0].symbol, 0) ] print("Équations établies : ", self.equations) def list_unknowns(self): self.unknowns = [] for wire in self.circuit.wires: self.unknowns.append(wire.symbol) print("Inconnues listées :", self.unknowns) def solve_equations(self): print(solve(self.equations, self.unknowns))
UTF-8
Python
false
false
2,014
8,521,215,153,225
5af78d38d1810d19eb4804464735d6d5bdfcf4fa
7040963cf58d7a75cb214d057b2a2104a2eadb19
/app/modules/session.py
8bbc72cea566b9b9f6caf6a55e574885e237495a
[]
no_license
bjornua/lanbot
https://github.com/bjornua/lanbot
79866fa20f852e7c083436036436f4c9afe6fb8d
911f3d3d0a2c26389c0fb1ae4b339112689f7883
refs/heads/master
2020-06-08T01:29:59.252730
2011-06-14T22:52:40
2011-06-14T22:52:40
1,251,952
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
# -*- coding: utf-8 -*- from app.modules.db import db class Session(object): def __init__(self, nick, host): self.nick = nick self.host = host self.is_init = False self.changed = False def init(self): if self.is_init: return self.is_init = True if not self.load_session(): self.new_session() def load_session(self): for row in db().view("session/by_sender", include_docs=True)[self.nick, self.host]: self.doc = row.doc return True return False def new_session(self): self.doc = { "nickname": self.nick, "host": self.host, "type": "session", "data": {} } def save(self): if not self.is_init: return self.doc["_id"], self.doc["_rev"] = db().save(self.doc) def get(self, *args, **kwargs): self.init() return self.doc["data"].get(*args,**kwargs) def __setitem__(self, *args, **kwargs): self.init() return self.doc["data"].__setitem__(*args,**kwargs)
UTF-8
Python
false
false
2,011
9,483,287,825,720
b0283ae43d2d3f3c98f8c8354bfe619153657374
0c6261b5beebde9d0ae09a9d34ecf9e73cf10988
/hunger/email.py
8edc85362e7aa2fc00394dd6d49521b75e846882
[ "MIT", "BSD-2-Clause" ]
permissive
SpazioDati/django-hunger
https://github.com/SpazioDati/django-hunger
64296f994fb1958a88d8c0bb14b00e962c46d083
14ef1b31b7d5d74acc03753acdf6e434350d566e
refs/heads/master
2020-12-24T23:29:00.997834
2013-07-10T07:56:54
2013-07-10T07:56:54
7,501,106
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
import os.path from django.core.mail import EmailMultiAlternatives from django.core.urlresolvers import reverse from django.utils.translation import ugettext as _ from django.template.loader import get_template, render_to_string from django.template import Context from hunger.utils import setting, MandrillMail try: from templated_email import send_templated_mail templated_email_available = True except ImportError: templated_email_available = False def beta_confirm(email, **kwargs): """ Send out email confirming that they requested an invite. """ templates_folder = setting('BETA_EMAIL_TEMPLATES_DIR', 'hunger') templates_folder = os.path.join(templates_folder, '') from_email = kwargs.get('from_email', setting("DEFAULT_FROM_EMAIL")) if templates_folder == 'hunger': file_extension = 'email' else: file_extension = None context_dict = kwargs.copy() MandrillMail('hunger/beta_confirm.email', context=context_dict).send( from_email=from_email, recipient_list=[email], ) def beta_invite(email, code, request, **kwargs): """ Email for sending out the invitation code to the user. Invitation URL is added to the context, so it can be rendered with standard django template engine. """ context_dict = kwargs.copy() context_dict.setdefault( "invite_url", request.build_absolute_uri(reverse("beta_verify_invite", args=[code])) ) context = Context(context_dict) from_email = kwargs.get('from_email', setting("DEFAULT_FROM_EMAIL")) html_content = text_content = None if kwargs.get('custom_message'): from hunger.utils import html2plain html_content = kwargs.get('custom_message').format(invite_url=context_dict['invite_url']) text_content = html2plain(html_content) MandrillMail('hunger/beta_invite.email', context=context_dict).send( from_email=from_email, recipient_list=[email], html=html_content, fulltext=text_content, )
UTF-8
Python
false
false
2,013
6,305,012,028,334
a42c5423c4b062f5dac32a70851bcbdf27d4f0e0
0448f3411cafb60156480df2422f524d9ba19fc4
/plugins/unittests/enumeration/deepWebDirBruterPluginTest.py
d39cd214def6f1e9fcb004c86b901310aa0e60d9
[]
no_license
Trietptm-on-Security/Tortazo
https://github.com/Trietptm-on-Security/Tortazo
24a65c4a259cf669604b897a1bedb0324d617550
efba01d7bc6f8b1e0adddbd3673ad92d591ee3c5
refs/heads/master
2017-11-28T18:10:22.630285
2014-12-01T09:20:53
2014-12-01T09:20:53
55,591,714
0
1
null
true
2016-04-06T09:09:13
2016-04-06T09:09:13
2016-03-10T14:39:48
2015-05-27T22:11:25
568,046
0
0
0
null
null
null
# coding=utf-8 ''' Created on 22/01/2014 #Author: Adastra. #twitter: @jdaanial deepWebDirBruterPluginTest.py deepWebDirBruterPluginTest is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation version 2 of the License. deepWebDirBruterPluginTest is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with Tortazo; if not, write to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA ''' import sys import os.path sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), "../../../"))) import os import sys import unittest from plugins.enumeration.deepWebDirBruterPlugin import deepWebDirBruterPlugin from config import unittests from config import config from core.tortazo.exceptions.PluginException import PluginException class deepWebDirBruterPluginTest(unittest.TestCase): def setUp(self): self.plugin = deepWebDirBruterPlugin() self.pluginArgs = [] self.plugin.serviceConnector.setSocksProxySettings(config.socksHost, config.socksPort) self.plugin.setPluginArguments(self.pluginArgs) self.plugin.processPluginArguments() def test_dirBruterOnRelay(self): print "Testing dirBruterOnRelay with args: site=%s " %(unittests.dirBruter_urlSite) self.assertRaises(PluginException, self.plugin.dirBruterOnRelay, site=unittests.dirBruter_urlSite) print "Testing dirBruterOnRelay with args: site=%s " %(None) self.assertRaises(PluginException, self.plugin.dirBruterOnRelay, site=None) def test_dirBruterOnAllRelays(self): print "Testing dirBruterOnRelay with args: port=%s " %(str(unittests.dirBruter_portInvalid)) self.assertRaises(PluginException, self.plugin.dirBruterOnAllRelays, port=unittests.dirBruter_portInvalid) print "Testing dirBruterOnRelay with args: port=%s " %(None) self.assertRaises(PluginException, self.plugin.dirBruterOnAllRelays, port=None) def test_dirBruterOnHiddenService(self): print "Testing dirBruterOnHiddenService with args: hiddenService=%s " %(unittests.dirBruter_onionserviceInvalid) self.assertRaises(PluginException, self.plugin.dirBruterOnHiddenService, hiddenService=unittests.dirBruter_onionserviceInvalid) print "Testing dirBruterOnHiddenService with args: hiddenService=%s " %(None) self.assertRaises(PluginException, self.plugin.dirBruterOnHiddenService, hiddenService=None) if __name__ == '__main__': unittest.main()
UTF-8
Python
false
false
2,014
446,676,600,781
29ada3a19aaa113e158087e342a0118b06dc09d0
cefd6c17774b5c94240d57adccef57d9bba4a2e9
/WebKit/Tools/Scripts/webkitpy/performance_tests/perftest.py
5a2f04322981a159adfa9507847a3c162339464a
[ "BSL-1.0" ]
permissive
adzhou/oragle
https://github.com/adzhou/oragle
9c054c25b24ff0a65cb9639bafd02aac2bcdce8b
5442d418b87d0da161429ffa5cb83777e9b38e4d
refs/heads/master
2022-11-01T05:04:59.368831
2014-03-12T15:50:08
2014-03-12T15:50:08
17,238,063
0
1
BSL-1.0
false
2022-10-18T04:23:53
2014-02-27T05:39:44
2014-03-16T16:37:05
2014-03-16T16:37:07
270,876
0
1
1
C++
false
false
# Copyright (C) 2012, 2013 Apple Inc. All rights reserved. # Copyright (C) 2012, 2013 Google Inc. All rights reserved. # Copyright (C) 2012 Zoltan Horvath, Adobe Systems Incorporated. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import errno import logging import math import re import os import signal import socket import subprocess import sys import time from webkitpy.layout_tests.controllers.test_result_writer import TestResultWriter from webkitpy.port.driver import DriverInput from webkitpy.port.driver import DriverOutput DEFAULT_TEST_RUNNER_COUNT = 4 _log = logging.getLogger(__name__) class PerfTestMetric(object): def __init__(self, path, test_file_name, metric, unit=None, aggregator=None, iterations=None): # FIXME: Fix runner.js to report correct metric names self._iterations = iterations or [] self._unit = unit or self.metric_to_unit(metric) self._aggregator = aggregator self._metric = self.time_unit_to_metric(self._unit) if metric == 'Time' else metric self._path = path self._test_file_name = test_file_name def name(self): return self._metric def aggregator(self): return self._aggregator def path(self): return self._path def test_file_name(self): return self._test_file_name def has_values(self): return bool(self._iterations) def append_group(self, group_values): assert isinstance(group_values, list) self._iterations.append(group_values) def grouped_iteration_values(self): return self._iterations def flattened_iteration_values(self): return [value for group_values in self._iterations for value in group_values] def unit(self): return self._unit @staticmethod def metric_to_unit(metric): assert metric in ('Time', 'Malloc', 'JSHeap') return 'ms' if metric == 'Time' else 'bytes' @staticmethod def time_unit_to_metric(unit): return {'fps': 'FrameRate', 'runs/s': 'Runs', 'ms': 'Time'}[unit] class PerfTest(object): def __init__(self, port, test_name, test_path, test_runner_count=DEFAULT_TEST_RUNNER_COUNT): self._port = port self._test_name = test_name self._test_path = test_path self._description = None self._metrics = [] self._test_runner_count = test_runner_count def test_name(self): return self._test_name def test_name_without_file_extension(self): return re.sub(r'\.\w+$', '', self.test_name()) def test_path(self): return self._test_path def description(self): return self._description def prepare(self, time_out_ms): return True def _create_driver(self): return self._port.create_driver(worker_number=0, no_timeout=True) def run(self, time_out_ms): for _ in xrange(self._test_runner_count): driver = self._create_driver() try: if not self._run_with_driver(driver, time_out_ms): return None finally: driver.stop() should_log = not self._port.get_option('profile') if should_log and self._description: _log.info('DESCRIPTION: %s' % self._description) results = [] for subtest in self._metrics: for metric in subtest['metrics']: results.append(metric) if should_log and not subtest['name']: legacy_chromium_bot_compatible_name = self.test_name_without_file_extension().replace('/', ': ') self.log_statistics(legacy_chromium_bot_compatible_name + ': ' + metric.name(), metric.flattened_iteration_values(), metric.unit()) return results @staticmethod def log_statistics(test_name, values, unit): sorted_values = sorted(values) # Compute the mean and variance using Knuth's online algorithm (has good numerical stability). square_sum = 0 mean = 0 for i, time in enumerate(sorted_values): delta = time - mean sweep = i + 1.0 mean += delta / sweep square_sum += delta * (time - mean) middle = int(len(sorted_values) / 2) mean = sum(sorted_values) / len(values) median = sorted_values[middle] if len(sorted_values) % 2 else (sorted_values[middle - 1] + sorted_values[middle]) / 2 stdev = math.sqrt(square_sum / (len(sorted_values) - 1)) if len(sorted_values) > 1 else 0 _log.info('RESULT %s= %s %s' % (test_name, mean, unit)) _log.info('median= %s %s, stdev= %s %s, min= %s %s, max= %s %s' % (median, unit, stdev, unit, sorted_values[0], unit, sorted_values[-1], unit)) _description_regex = re.compile(r'^Description: (?P<description>.*)$', re.IGNORECASE) _metrics_regex = re.compile(r'^(?P<subtest>[A-Za-z0-9\(\[].+?)?:(?P<metric>[A-Z][A-Za-z]+)(:(?P<aggregator>[A-Z][A-Za-z]+))? -> \[(?P<values>(\d+(\.\d+)?)(, \d+(\.\d+)?)+)\] (?P<unit>[a-z/]+)?$') def _run_with_driver(self, driver, time_out_ms): output = self.run_single(driver, self.test_path(), time_out_ms) self._filter_output(output) if self.run_failed(output): return False current_metric = None for line in re.split('\n', output.text): description_match = self._description_regex.match(line) if description_match: self._description = description_match.group('description') continue metric_match = self._metrics_regex.match(line) if not metric_match: _log.error('ERROR: ' + line) return False metric = self._ensure_metrics(metric_match.group('metric'), metric_match.group('subtest'), metric_match.group('unit'), metric_match.group('aggregator')) metric.append_group(map(lambda value: float(value), metric_match.group('values').split(', '))) return True def _ensure_metrics(self, metric_name, subtest_name='', unit=None, aggregator=None): try: subtest = next(subtest for subtest in self._metrics if subtest['name'] == subtest_name) except StopIteration: subtest = {'name': subtest_name, 'metrics': []} self._metrics.append(subtest) try: return next(metric for metric in subtest['metrics'] if metric.name() == metric_name) except StopIteration: path = self.test_name_without_file_extension().split('/') if subtest_name: path += subtest_name.split('/') metric = PerfTestMetric(path, self._test_name, metric_name, unit, aggregator) subtest['metrics'].append(metric) return metric def run_single(self, driver, test_path, time_out_ms, should_run_pixel_test=False): return driver.run_test(DriverInput(test_path, time_out_ms, image_hash=None, should_run_pixel_test=should_run_pixel_test), stop_when_done=False) def run_failed(self, output): if output.text == None or output.error: pass elif output.timeout: _log.error('timeout: %s' % self.test_name()) elif output.crash: _log.error('crash: %s' % self.test_name()) else: return False if output.error: _log.error('error: %s\n%s' % (self.test_name(), output.error)) return True @staticmethod def _should_ignore_line(regexps, line): if not line: return True for regexp in regexps: if regexp.search(line): return True return False _lines_to_ignore_in_parser_result = [ # Following are for handle existing test like Dromaeo re.compile(re.escape("""main frame - has 1 onunload handler(s)""")), re.compile(re.escape("""frame "<!--framePath //<!--frame0-->-->" - has 1 onunload handler(s)""")), re.compile(re.escape("""frame "<!--framePath //<!--frame0-->/<!--frame0-->-->" - has 1 onunload handler(s)""")), # Following is for html5.html re.compile(re.escape("""Blocked access to external URL http://www.whatwg.org/specs/web-apps/current-work/""")), re.compile(r"CONSOLE MESSAGE: (line \d+: )?Blocked script execution in '[A-Za-z0-9\-\.:]+' because the document's frame is sandboxed and the 'allow-scripts' permission is not set."), re.compile(r"CONSOLE MESSAGE: (line \d+: )?Not allowed to load local resource"), # DoYouEvenBench re.compile(re.escape("CONSOLE MESSAGE: line 140: Miss the info bar? Run TodoMVC from a server to avoid a cross-origin error.")), re.compile(re.escape("CONSOLE MESSAGE: line 315: TypeError: Attempted to assign to readonly property.")), re.compile(re.escape("CONSOLE MESSAGE: line 3285: DEBUG: -------------------------------")), re.compile(re.escape("CONSOLE MESSAGE: line 3285: DEBUG: Ember : 1.3.1")), re.compile(re.escape("CONSOLE MESSAGE: line 3285: DEBUG: Ember Data : 1.0.0-beta.6")), re.compile(re.escape("CONSOLE MESSAGE: line 3285: DEBUG: Handlebars : 1.3.0")), re.compile(re.escape("CONSOLE MESSAGE: line 3285: DEBUG: jQuery : 2.1.0")), re.compile(re.escape("CONSOLE MESSAGE: line 3285: DEPRECATION: Namespaces should not begin with lowercase")), re.compile(re.escape("[email protected]:2:40")), re.compile(re.escape("[email protected]:3380:17")), re.compile(re.escape("CONSOLE MESSAGE: line 124: Booting in DEBUG mode")), re.compile(re.escape("CONSOLE MESSAGE: line 125: You can configure event logging with DEBUG.events.logAll()/logNone()/logByName()/logByAction()")), re.compile(re.escape("CONSOLE MESSAGE: line 3285: Ember Views require jQuery 1.7, 1.8, 1.9, 1.10, or 2.0")), re.compile(re.escape("CONSOLE MESSAGE: line 3285: DEPRECATION: Namespaces should not begin with lowercase.")), ] def _filter_output(self, output): if output.text: output.text = '\n'.join([line for line in re.split('\n', output.text) if not self._should_ignore_line(self._lines_to_ignore_in_parser_result, line)]) class SingleProcessPerfTest(PerfTest): def __init__(self, port, test_name, test_path, test_runner_count=1): super(SingleProcessPerfTest, self).__init__(port, test_name, test_path, test_runner_count) class PerfTestFactory(object): _pattern_map = [ (re.compile(r'^Dromaeo/'), SingleProcessPerfTest), ] @classmethod def create_perf_test(cls, port, test_name, path, test_runner_count=DEFAULT_TEST_RUNNER_COUNT): for (pattern, test_class) in cls._pattern_map: if pattern.match(test_name): return test_class(port, test_name, path, test_runner_count) return PerfTest(port, test_name, path, test_runner_count)
UTF-8
Python
false
false
2,014
10,093,173,172,957
fe4f3899a3a7b75256b8d2b92bacf57d11486cfb
82539e992b748bcc8023f20b16aac0662dcd52aa
/bdd-docs/tan_xml_files_parser.py
acc64cbd05a6dd303a7071fdaffddeb7877c6d7c
[]
no_license
XioNoX/nod
https://github.com/XioNoX/nod
3d63d69271dcce9ba4c9bd8e844afbcc35c4a97d
b1c2a9ddc13e649802690be3521d770201e23eae
refs/heads/master
2020-04-09T03:52:18.983377
2012-06-22T17:15:37
2012-06-22T17:15:37
3,638,186
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
import codecs import os import sys try: import xml.etree.cElementTree as ET import re except ImportError: import xml.etree.ElementTree as ET def parse_xml_files_of(directory): directory_name = 'parser_result' if not os.path.exists(directory_name): os.makedirs(directory_name) for xmlfile in os.listdir(directory): create_filedata_for_stations(directory+'/'+xmlfile,directory_name) print 'File : '+xmlfile+" done..." def create_filedata_for_stations(xml_filename, directory_name): # Get the number of tan line m = re.search('(?<=_TAN)\w+',xml_filename) linenumber = m.group(0) datafile = codecs.open(directory_name+'/data_'+linenumber+'.sql','w','utf-8') datafile.write('# line to insert in database for table \'tan_arrets\' has the following structure : line number - station name - longitude - latitude') datafile.write('\n\n') tree = ET.ElementTree(file=xml_filename) root = tree.getroot() ancient_station_name ='' for elem in tree.iter(tag='StopPoint'): station_name = elem.findtext('name') station_longitude = elem.findtext('longitude') station_latitude = elem.findtext('latitude') if ancient_station_name != station_name: datafile.write('INSERT INTO `tan_stops`(`number_of_line`, `name_stop`, `longitude`, `latitude`) VALUES ('+linenumber+',"'+station_name+'",'+station_longitude+','+station_latitude+');\n') ancient_station_name = station_name if __name__ == '__main__': if len(sys.argv) <2: print 'Usage : python tan_xml_file_parser directory containing xml files' elif len(sys.argv)==2: parse_xml_files_of(sys.argv[1])
UTF-8
Python
false
false
2,012
6,133,213,319,234
36c4fbd5c19ffbc228cef6a5262cf6fdcf378bd6
0752bbbe8faa1913edc3c7538c963137bbeeab68
/lab2/dbms/views.py
2053afcb0486cf29bf32ebc7874e8b9eff20d684
[]
no_license
mrnethen/lab2
https://github.com/mrnethen/lab2
31ec10385a0bd475df11cf2b7d3c4da63e302893
9858330b1dffc8c305c4244fea5e9b822b01e865
refs/heads/master
2016-09-05T11:32:25.116198
2014-12-06T11:27:07
2014-12-06T11:27:07
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
from os import listdir, path, getcwd import ast import time import json from django.shortcuts import render_to_response from django.template import RequestContext from django.http import HttpResponseRedirect, HttpResponse from django.conf import settings from django.core.cache import cache from dbms.lab.database import Database from dbms.lab.lab import String255, Enumeration, StringInvl from rest_framework import views from rest_framework.decorators import api_view from rest_framework.response import Response # class ShareView(views.APIView): # permission_classes = [] # def get(self, request, *args, **kwargs): # email = request.DATA.get('email', None) # url = request.DATA.get('url', None) # if email and url: # share_url(email, url) # return Response({"success": True}) # else: # return Response({"success": False}) @api_view(['GET', 'POST']) def home(request): cache.clear() database_list = listdir("dbms/lab/dbs") return HttpResponse(json.dumps({'database_list' : database_list }), content_type="application/json") def load_db(name): path = "dbms/lab/dbs/" + name db = Database(name) db.load(path) return db @api_view(['GET']) def load_database(request, name): db = load_db(name) print type(db.tables_names) return HttpResponse(json.dumps({'tables_names': db.tables_names})) @api_view(['GET']) def show_table(request, db_name, table_name): context = RequestContext(request) db = load_db(db_name) table = None for i in range(len(db.tables)): if db.tables[i].name == table_name: table = db.tables[i] data = table.rows enum_dict = {} options = {} return dump_table(request, db_name, table) def date_handler(obj): return obj.isoformat() if hasattr(obj, 'isoformat') else obj def dump_table(request, db_name, table): schema = {} rows = {} for i, row in enumerate(table.schema): if not row[0] == "Enumeration": schema.update({str(i): {"type": row[0], "name": row[1]}}) else: values = [] # print row # for item in row[2]: # values += [item] # print values schema.update({str(i): {"type": row[0], "name": row[1], "values": row[2]}}) for i, row in enumerate(table.rows): col_row = [] for col in table.rows[i]: col_row += [str(col)] rows.update({str(i): {"values": col_row}}) return HttpResponse(json.dumps( { "db_name" : db_name, "table_name" : table.name, "schema" : schema, "rows" : rows })) def add_row(request, db_name, table_name): db = load_db(db_name) table = None for i in range(len(db.tables)): if db.tables[i].name == table_name: table = db.tables[i] table.addRow() table.save(settings.BASE_DIR + "/dbms/lab/dbs/%s/" % db_name) return HttpResponseRedirect(request.META.get('HTTP_REFERER')) def delete_row(request, db_name, table_name, row_number): db = load_db(db_name) table = None for i in range(len(db.tables)): if db.tables[i].name == table_name: table = db.tables[i] table.deleteRow(int(row_number)-1) table.save(settings.BASE_DIR + "/dbms/lab/dbs/%s/" % db_name) return HttpResponseRedirect(request.META.get('HTTP_REFERER')) def add_table(request, name): return render_to_response("new_table.html", {"db_name": name}) @api_view(['GET']) def sort_column(request, db_name, table_name, column_number): db = load_db(db_name) table = None for i in range(len(db.tables)): if db.tables[i].name == table_name: table = db.tables[i] table.sort(int(column_number)) table.save(settings.BASE_DIR + "/dbms/lab/dbs/%s/" % db_name) return dump_table(request, db_name, table) @api_view(['GET']) def join_tables(request, db_name, table_name1, table_name2): db = load_db(db_name) table_name = db.join(table_name1 + ".json", table_name2 + ".json") db = load_db(db_name) table = None for i in range(len(db.tables)): if db.tables[i].name == table_name: table = db.tables[i] print table return dump_table(request, db_name, table)
UTF-8
Python
false
false
2,014
309,237,673,988
ec3a5ab20e85f621eb42470a061af426ef361a9e
a04b58a448aba6c8bd85c73712295a2593e3097b
/foiamachine/apps/mail/urls.py
5bf7efcdeb79133b14f10e9c96714a1d85c6e947
[ "MIT" ]
permissive
datadesk/foiamachine
https://github.com/datadesk/foiamachine
4dace3eccc1a143bb86c2826ddefacabdac72f4b
ea2c4660637493406d839f453ad450ec092a8e74
refs/heads/master
2017-10-07T13:52:46.095015
2014-11-20T00:45:32
2014-11-20T00:45:32
26,888,239
4
0
null
null
null
null
null
null
null
null
null
null
null
null
null
from django.conf.urls.defaults import * from apps.mail.views import MailRequestListView, MailBoxMailListView, MailDetailView, associate_message, s3_file_view from django.contrib.auth.decorators import login_required urlpatterns = patterns('', url(r'^incoming/$', 'apps.mail.views.new_msg', name='new_msg'), url(r'^check/$', 'apps.mail.views.check_mail', name='check_mail'), url(r'^mailbox/$', login_required(MailBoxMailListView.as_view()), name='mailbox_mail_view'), url(r'^orphaned/$', associate_message, name='orphaned_mail_view'), url(r'^orphaned/(?P<message_id>.+)/$', associate_message, name='orphaned_mail_view'), url(r'^detail/(?P<slug>.+)/$', login_required(MailDetailView.as_view()), name='mail_detail_view'), url(r'^request/(?P<slug>.+)/$', login_required(MailRequestListView.as_view()), name='request_mail_view'), url(r'^attachments/(?P<rpk>[\d]+)/(?P<pk>[\d]+)/$', s3_file_view, name='get_attachment_view'), )
UTF-8
Python
false
false
2,014
4,011,499,463,699
20bf2e060c7387665a6dd15c79b5fa47cd601cea
5dfe1ac543c18530aba087555d67dcf9915aca91
/python_version/pypipa/widgets_master.py
def47f46868fb1e1726fe4335b42d08be972787d
[]
no_license
orakeldel/PyPiPa
https://github.com/orakeldel/PyPiPa
0a9c7847a7263690154a8ba436ac1d8836ac3e7d
27287802592aa8bea540fe407fc0c887f82aa5b8
refs/heads/master
2021-01-20T09:03:39.951745
2011-12-31T11:31:30
2011-12-31T11:31:30
3,079,181
1
0
null
null
null
null
null
null
null
null
null
null
null
null
null
#! /usr/bin/python """ PyPiPa - Python Pirate Party. Copyright (C) 2010 launchpad.net/pypipa <[email protected]> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA """ import pygame from pygame.locals import * widgets = __import__("widgets.widget_menu", globals(), locals(), ['WidgetMenu'], -1) class WidgetsMaster (object): """ The WidgetsMaster handles 1-4 widgets on the screen. Widgets may be games or control elements. The widgets are orderd the following: 1:|-----| 2:|-----| 3:|-----| 4:|-----| |#####| |##|##| |##|##| |##|##| |#####| |##|##| |-----| |-----| |#####| |##|##| |#####| |##|##| |-----| |-----| |-----| |-----| In case of 2 players, there will be one widgetmaster each: 1:|-------| 2:|-------| 3:|--------| 4:|-------| |#######| |###|###| |##|##|##| |#|#|#|#| |-------| |-------| |--------| |-------| """ def __init__ (self, screen, widgets_count = 1, widgets = None, is_multiplayer = False): """ Constructor of WidgetsMaster @type screen: pygame.Surface @param screen: Surface to paint uppon @type widgets_count: int @param widgets_count: numbers of widgets to draw (default: 1) @type widgets: array @param widgets: array of widgets to use (size has to >= widgets_count), None to define later using set_widgets() (defualt: None) @type is_multiplayer: bool @param is_multiplayer: shall screen tile like multiplayer mode (defualt: false) """ self.screen = screen self.widgets_count = widgets_count; if self.widgets_count > 4: print ("ERROR: Widgets_count greater than 4: Resetting to 4.") self.widgets_count = 4; self.widgets = widgets self.is_multiplayer = is_multiplayer; self.create_screen_parts() i = 0 for widget in self.widgets: widget.define_screen(self.screenparts[i]) i+=1 def change_widgets_count (self, new_widgets_count): """ Changes the number of widgets @type new_widgets_count: int @param new_widgets_count: numbers of widgets to draw """ if new_widgets_count > 4: print ("ERROR: new_widgets_count greater than 4: Resetting to 4.") else: self.widgets_count = new_widgets_count def set_widgets (self, widget, widget_index = 0): """ Sets the widgets to draw @type widget: widget @param widget: widget to be changed to @type widget_index: int @param widget_index: index of the widget to change (default: 0) """ if widget_index < len(self.widgets): self.widgets[widget_index] = widget def draw (self): """ Draws the widget on the screen """ i = 0 for widget in self.widgets: if i < 2: self.screen.blit(widget.get_surface(), (i*self.screen.get_width()/2,0)) else: self.screen.blit(widget.get_surface(), ((i % 2)*self.screen.get_width()/2, self.screen.get_height()/2)) i+=1 def manage_interaction (self): """ Manages interaction with the widgets (f.e. cursor key up pressed,...) """ reactions = [] for e in pygame.event.get(): if e.type == QUIT: pygame.quit() return False if e.type == KEYDOWN: for widget in self.widgets: reactions.append(widget.manage_interaction(e.key)) if isinstance(self.widgets[0], widgets.WidgetMenu): return reactions[0]; #TODO: Possibility of message return from modules reaction = True for react in reactions: if react == False: reaction = "widgets.WidgetMenu" return reaction return True def create_screen_parts (self): """ makes all needed screenparts """ self.screenparts = []; if self.widgets_count == 1: self.screenparts.append(pygame.Surface((self.screen.get_width(),self.screen.get_height()))) elif self.widgets_count == 2: for i in range (0,2): self.screenparts.append(pygame.Surface(((self.screen.get_width()/2),self.screen.get_height()))) elif self.widgets_count == 3: for i in range (0,2): self.screenparts.append(pygame.Surface(((self.screen.get_width()/2),(self.screen.get_height()/2)))) self.screenparts.append(pygame.Surface((self.screen.get_width(),(self.screen.get_height()/2)))) elif self.widgets_count == 4: for i in range (0,4): self.screenparts.append(pygame.Surface(((self.screen.get_width()/2),(self.screen.get_height()/2)))) else: print ("ERROR: widgets_count is neither 1,2,3 nor 4.") #TODO: 2 players pass
UTF-8
Python
false
false
2,011
18,665,927,870,597
04efe86bf1fa6df4f07ef3f0fe387468191d1b28
4aa87bbc6e8869a0e96ad4cf09a977c078723a27
/localtv/templatetags/video_list.py
23f083538f8de0fd921290ebd1077b8be03421fc
[ "AGPL-3.0-only", "MIT", "GPL-1.0-or-later" ]
non_permissive
chrmorais/mirocommunity
https://github.com/chrmorais/mirocommunity
4db484543a0f7bf1f88ca2a9e5d8ac4875529230
051de162ee37bfae838febb79ef890cf29be4e88
refs/heads/master
2020-03-22T10:01:16.933154
2013-06-28T20:54:30
2013-06-28T21:41:56
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
from django import template from localtv.search.utils import NormalizedVideoList from localtv.search.views import SortFilterMixin register = template.Library() class BaseVideoListNode(SortFilterMixin, template.Node): """ Base helper class (abstract) for handling the get_video_list_* template tags. Based heavily on the template tags for django.contrib.comments. Syntax:: {% get_video_list_FOO as <varname> %} {% get_video_list_for_FOO <foo_instance> as <varname> %} """ @classmethod def handle_token(cls, parser, token): """Class method to parse get_video_list_* and return a Node.""" bits = token.split_contents() tag_name = bits[0] bits = bits[1:] argument_count = int(cls.filter_name is not None) + 2 if len(bits) != argument_count: raise template.TemplateSyntaxError( "%r tag requires %i arguments" % (tag_name, argument_count)) item = None if argument_count == 3: item = parser.compile_filter(bits[0]) bits = bits[1:] if bits[0] != 'as': raise template.TemplateSyntaxError( "%s argument in %r tag must be 'as'" % ( "Third" if argument_count == 3 else "Second", tag_name)) return cls(item=item, as_varname=bits[1]) def __init__(self, item=None, as_varname=None): self.item = item self.as_varname = as_varname def render(self, context): context[self.as_varname] = self.get_video_list(context) return '' def get_video_list(self, context): form = self.get_form(filter_value=self.get_filter_value(context)) return NormalizedVideoList(form.search()) def get_filter_value(self, context): if self.filter_name is None: return None return self.item.resolve(context) class NewVideoListNode(BaseVideoListNode): """ Insert a list of new videos into the context. """ pass class PopularVideoListNode(BaseVideoListNode): """ Insert a list of popular videos into the context. """ sort = 'popular' class FeaturedVideoListNode(BaseVideoListNode): """ Insert a list of featured videos into the context. """ sort = 'featured' class CategoryVideoListNode(BaseVideoListNode): """ Insert a list of videos for the given category into the context. """ filter_name = 'category' def get_filter_value(self, context): return [self.item.resolve(context)] class TagVideoListNode(BaseVideoListNode): """ Insert a list of videos for the given tag into the context. """ filter_name = 'tag' class UserVideoListNode(BaseVideoListNode): """ Insert a list of videos for the given user into the context. """ filter_name = 'author' def get_filter_value(self, context): return [self.item.resolve(context)] register.tag('get_video_list_new', NewVideoListNode.handle_token) register.tag('get_video_list_popular', PopularVideoListNode.handle_token) register.tag('get_video_list_featured', FeaturedVideoListNode.handle_token) register.tag('get_video_list_for_category', CategoryVideoListNode.handle_token) register.tag('get_video_list_for_tag', TagVideoListNode.handle_token) register.tag('get_video_list_for_user', UserVideoListNode.handle_token)
UTF-8
Python
false
false
2,013
13,288,628,831,268
4d30253f760ff8d231b574446b0acaf56a0c8652
0fa915b833f6d1aa06f812111dd0c50115079afd
/models/__init__.py
989914c54ee6f41f93cc7c9516e02158161876df
[]
no_license
wodesuck/mstcweb
https://github.com/wodesuck/mstcweb
4327071e0bfc1a95c3f12b7a79622cb74826a9e1
d510d41ca401c68c3aab3f416bdd615c149359cc
refs/heads/master
2020-04-09T16:41:01.872014
2013-12-08T10:50:28
2013-12-08T10:50:28
11,688,628
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
import form import page
UTF-8
Python
false
false
2,013
4,088,808,908,128
4b2031f2ac4f1dd26e78d15180b6431bde70317f
34395e6610d50036511d826da4971d26a138b273
/json-plugin/json_plugin.py
e71d90bf20c7f0c05e9711a0f9fae09fef0797c7
[ "MIT" ]
permissive
Sirtea/bottle-utils
https://github.com/Sirtea/bottle-utils
9179b696241466864c6e0fcc58f85aaf433e4aac
bbed52ecd7f5396fded4b60556a24d151e9fd654
refs/heads/master
2020-03-30T21:11:26.721101
2014-05-12T11:26:54
2014-05-12T11:26:54
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
from inspect import getargspec from json import dumps, load from bottle import request, response class JsonPlugin(object): def __init__(self, keyword): self.keyword = keyword def apply(self, callback, context): def wrapper(*args, **kwargs): _args = getargspec(callback)[0] if self.keyword in _args: try: kwargs[self.keyword] = load(request.body) except: kwargs[self.keyword] = None body, status = callback(*args, **kwargs) response.content_type = 'application/json' response.status = status if body is not None: return dumps(body, separators=(',', ':')) return '' return wrapper
UTF-8
Python
false
false
2,014
13,511,967,118,251
df7a68b8fb0b79eefe6972cf4e947c4fc7f1aef3
daf9a0f4eb63b84d62b7628af53b4083e113e667
/test/test_SOV2_01_100.py
8f595ea9f0fdcaa390790bf664d60acd4d145ee6
[ "LicenseRef-scancode-cecill-c-en" ]
non_permissive
fayazr/libim7
https://github.com/fayazr/libim7
5121c76b834d752706c93e1ba0fea96237be5540
1bbd980070dc852a116cc4ff5bf426928edc275b
refs/heads/master
2021-01-21T02:59:41.472583
2011-04-28T14:55:04
2011-04-28T14:55:04
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
#!/usr/bin/python # -*- coding: utf-8 -*- #Copyright (C) 2010 Fabricio Silva """ """ import sys sys.path.append('..') import numpy as np trace = True flags = [True, True, True, True] if trace: import matplotlib.pyplot as plt # Figure1 : vx, 2:vy, 3:vz f1 = plt.figure(1) f2 = plt.figure(2) f3 = plt.figure(3) f1.suptitle('$V_x$') f2.suptitle('$V_y$') f3.suptitle('$V_z$') Ax = [f.add_subplot(221) for f in (f1,f2,f3)] share = lambda ind: {'sharex': Ax[ind-1], 'sharey':Ax[ind-1]} nx, ny = 118, 78 d = {'interpolation':'nearest', 'vmin':-10, 'vmax':10, 'origin':'lower'} # libim7 if flags[0]: import libim7 as im7 buf1, att1 = im7.readim7('SOV2_01_100_davis.VC7') dx = {'extent':(buf1.x[0],buf1.x[-1],buf1.y[0],buf1.y[-1])} dx.update(d) # Storage: [index_x, index_y] # Need transpose to fit matplotlib image model. if trace: Ax[0].imshow(buf1.vx.T, **dx) Ax[1].imshow(buf1.vy.T, **dx) Ax[2].imshow(buf1.vz.T, **dx) # txt: comma to dot if flags[1]: f = file('SOV2_01_100_davis.txt', 'r') f.readline(); string = f.read(); f.close(); string = string.replace(',', '.') buf2 = np.fromstring(string, sep='\t').reshape((ny,nx,5)) # Storage: array[index_y, index_x, index_field] x, y = buf2[:,:,0][0,:], buf2[:,:,1][:,0] dx = {'extent':(x[0],x[-1],y[0],y[-1])} dx.update(d) if trace: f1.add_subplot(222, **share(1)).imshow(buf2[:,:,2], **dx) f2.add_subplot(222, **share(2)).imshow(buf2[:,:,3], **dx) f3.add_subplot(222, **share(3)).imshow(buf2[:,:,4], **dx) # dat if flags[2]: buf3 = np.loadtxt('SOV2_01_100_davis.dat', delimiter=' ', skiprows=3) buf3 = buf3.reshape((ny,nx,6)) # Storage: array[index_y, index_x, index_field] x, y = buf3[:,:,0][0,:], buf3[:,:,1][:,0] dx = {'extent':(x[0],x[-1],y[0],y[-1])} dx.update(d) if trace: f1.add_subplot(223, **share(1)).imshow(buf3[:,:,3], **dx) f2.add_subplot(223, **share(2)).imshow(buf3[:,:,4], **dx) f3.add_subplot(223, **share(3)).imshow(buf3[:,:,5], **dx) # mat if flags[3]: import scipy.io as io buf4 = io.loadmat('SOV2_01_100_pivmat.mat', struct_as_record=False)['v'][0][0] # Troubles with incorrect x and y vectors... (pb in pivmat save). if trace: f1.add_subplot(224).imshow(buf4.vx.T[::-1,:], **d) f2.add_subplot(224).imshow(-buf4.vy.T[::-1,:], **d) # vz not reliable (first tests on 3D pivmat) f3.add_subplot(224).imshow(buf4.vz.T[::-1,:], **d) if trace: plt.show()
UTF-8
Python
false
false
2,011
1,176,821,057,056
b878511b51d00dfdb0c40566a96d2c021b924df5
ee14995b2d24a7095adf375a8b1132b86b159c7b
/setup.py
a8cc037fa345a69d6b7ab665e6c8422d4064c225
[]
no_license
teepark/fire
https://github.com/teepark/fire
dcad3c0885a6974a16aa03a24eaf5a87035b6f41
7d206c7e5017817491c8b0ff6ab07be785c9516f
refs/heads/master
2020-06-30T22:52:00.034807
2011-12-27T15:26:43
2011-12-27T15:26:43
3,055,127
1
0
null
null
null
null
null
null
null
null
null
null
null
null
null
#!/usr/bin/env python # vim: fileencoding=utf8:et:sta:ai:sw=4:ts=4:sts=4 import setuptools setuptools.setup( name='fire', description='unix processes so easy a cave man could do it', author='Travis Parker', author_email='[email protected]', url='http://github.com/teepark/fire', py_modules=['fire'], )
UTF-8
Python
false
false
2,011
14,044,543,066,183
dec1a325f4ae82d269b0b483b1bad7a5ef8f0c8a
f4a11e2c70271ba1b417dc7525d8a20f5f448869
/derivative2.py
51fe19000fb3f91d2be669223d8f5629832db3b6
[]
no_license
she11c0de/cute-plots
https://github.com/she11c0de/cute-plots
637e18bbf5203c9fa005deae303299c0976f8d97
05731c4fc629f7298b5497d4d617b5477c7c0461
refs/heads/master
2016-09-10T20:03:47.734057
2013-06-30T14:27:21
2013-06-30T14:27:21
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
#!/bin/env python import matplotlib.pyplot as plt from mpl_toolkits.mplot3d import Axes3D from numpy import arange, sin, cos, pi, ones, linspace, log fig = plt.figure() x = arange(0, 10*pi, .1) plt.plot(x, x**2*sin(x), x, 2*x*sin(x) + x**2*cos(x), x, [0] * len(x), 'r--') plt.xlabel('X') plt.ylabel('Y') plt.legend(["x^2sin(x)", "2xsin(x)+x^2cos(x)"]) plt.show()
UTF-8
Python
false
false
2,013
18,605,798,367,845
baefc577ca59d48acb17c6f96de20d4ebc8753c8
bdfe2a612eea882ff5ed3f8381c8415c60cc5e3b
/owa.py
c4aa8269e1e70a30810f509bf811c623c3ed1337
[ "WTFPL" ]
permissive
loqui/owarunner
https://github.com/loqui/owarunner
46e3a1a057a69b05d8acbf3c1286bb8cb420769c
2f13f899c329d2f7a096a31a0dab003180afeb82
refs/heads/master
2021-01-23T20:49:58.072615
2013-05-19T19:04:48
2013-05-19T19:04:48
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
#!/usr/bin/env python from gi.repository import Gtk, WebKit, Gdk, GdkPixbuf from urlparse import urlparse import simplejson as json import os, sys, urllib2, argparse class App: def __init__(self): parser = argparse.ArgumentParser(description="Open Web App Runner") parser.add_argument("url", help="URL for the App without protocol") parser.add_argument("murl", nargs="?", help="Relative route for the manifest file (usually something like '/appname.webapp')") parser.add_argument("--width", dest="width", default=900, help="Default window width") parser.add_argument("--height", dest="height", default=500, help="Default window width") parser.add_argument("--fullscreen", dest="fullscreen", action="store_true", help="Start fullscreened") parser.add_argument("--custom", dest="fullscreen", action="store_true", help="Set custom decoration") args = parser.parse_args() self.url = urlparse(args.url) if(self.url.scheme == ""): self.url = urlparse("http://" + args.url) self.murl = args.murl self.window = Gtk.Window() self.window.set_position(Gtk.WindowPosition.CENTER_ALWAYS) self.window.set_default_size(int(args.width), int(args.height)) self.window.set_resizable(True) if(args.fullscreen): self.window.fullscreen() self.view = WebKit.WebView() self.view.open(self.url.geturl()) self.viewsettings = self.view.get_settings() self.viewsettings.set_property('enable-default-context-menu', False) self.viewsettings.set_property('enable-file-access-from-file-uris', True) self.view.connect('title-changed', self.title_changed) if(self.murl): self.manifest = self.appLoad(self.murl) if(self.manifest): self.window.set_title(self.manifest["name"]+ " - " + self.manifest["description"]) self.window.set_icon_list(self.iconsLoad()) self.window.add(self.view) self.window.show_all() self.window.connect('destroy', lambda w: Gtk.main_quit()) def appLoad(self, url): manifest = None try: response = urllib2.urlopen(self.url.scheme + "://" + self.url.netloc + self.murl) manifest = json.loads(response.read()) except: print "MANIFEST COULD NOT BE LOADED (" + self.url.scheme + "://" + self.url.netloc + self.murl + ")" return manifest def iconsLoad(self): iconlist = []; for icon in self.manifest["icons"].values(): content = None try: response = urllib2.urlopen(self.url.scheme + "://" + self.url.netloc + icon) content = response.read() fname='/tmp/owaicon.png' with open(fname,'w') as f: f.write(content) image = GdkPixbuf.Pixbuf.new_from_file(fname) iconlist.append(image) except: print "IMAGE COULD NOT BE LOADED (" + self.url.scheme + "://" + self.url.netloc + icon + ")" return iconlist def title_changed(self, widget, frame, title): self.window.set_title(title) def main(): app = App() Gtk.main() if __name__ == "__main__": main()
UTF-8
Python
false
false
2,013
11,716,670,787,049
3df4747129739bb495a16adaa3f188dfd8409dd7
f4cec24abc14b4ef7e60f40929d63dd1ed41b3ae
/setup.py
f36236a82a44f458779d63de8edb2d673ac29cc1
[]
no_license
denysbutenko/django-skeleton
https://github.com/denysbutenko/django-skeleton
9c7f9ace98406d0936d8f97b68fa814448f6375f
c9fd52527c5ecf6e50394482bad14204a4cd7305
refs/heads/master
2021-01-22T01:05:38.241468
2014-03-11T03:22:43
2014-03-11T03:22:43
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
#!/usr/bin/env python # # This script is designed to rename the default skeleton project name (myproject) and app (myfirstapp) to names you choose # import os, sys, string, shutil CURRENT_DIRECTORY = os.getcwd() CURRENT_PROJECT_NAME = 'myproject' CURRENT_APP_NAME = 'myfirstapp' GITHUB_CLONE_CMD = 'git clone https://github.com/mschettler/django-skeleton.git' # Enjoy the script? Tip some BTC! 18ZYJyeFAVUiGeWEZRu63NGwrr1ecz1eVR BAR100 = '-'*100 HASH100 = '#'*100 ################################################################################ # Helper functions ################################################################################ def get_user_feedback(): """ get user feedback, failing nicely if they kill it during input """ try: return raw_input().strip() except KeyboardInterrupt: print 'The script was killed. Goodbye!' sys.exit(0) def is_alphanumeric(instr): """ return false if instr contains anything other than letters or numbers """ allowed = string.letters + string.digits + '_' + '-' return not bool(set(instr) - set(allowed)) def validate_name(name): """ returns None on success, otherwise an error message is returned describing why the name failed to validate """ if ' ' in name: return 'Error: Name cannot contain spaces' if not is_alphanumeric(name): return 'Error: Name can only contain letters and numbers' if len(name) < 1: return 'Error: Name must have length' if name[0] in string.digits: return 'Error: Name cannot start with a digit' if name[0] in '_-': return 'Error: Name can only start with a letter, not an underscore or dash' # name passed validation return None def get_folder_list_for_directory(path, ignorehidden=True): """ returns a list of dirs found at the given path """ if not os.path.isdir(path): raise Exception('Invalid path "%s" passed to get_folder_list_for_directory(), does not appear to be a directory.') % path ret = [d for d in os.listdir(path) if os.path.isdir(d)] if ignorehidden: ret = [d for d in ret if not d.startswith('.')] return ret def rename(oldpath, newpath): """ similar to os.rename, performs some additional checks """ if not (oldpath and newpath): raise Exception('Failed to supply a paramter to rename()') if os.path.exists(newpath): raise Exception('Cannot rename, newpath [%s] already exists' % newpath) if not os.path.exists(oldpath): raise Exception('Cannot rename, oldpath [%s] does not exist' % old) # print '%s: Renamed %s to %s' % (__file__, oldpath, newpath) shutil.move(oldpath, newpath) def replace_text_infile(filepath, find, replace): if not (filepath and find): raise Exception('Failed to supply a parameter to replace_text_infile()') if not os.path.isfile(filepath): raise Exception('Cannot replace, filepath [%s] does not exists' % filepath) with open(filepath, 'r') as fh: fdata = fh.read() with open(filepath, 'w') as fh: # print '%s: replaced text "%s" in file "%s" with text "%s"' % (__file__, find, filepath, replace) fh.write(fdata.replace(find, replace)) def main(): ################################################################################ # Some sanity checks ################################################################################ # make sure we were executed from the proper directory if not (CURRENT_DIRECTORY.endswith('django-skeleton') or 'mschettler-django-skeleton-' in CURRENT_DIRECTORY): print 'Error: Script cannot continue' print ' * We seem to be in directory "%s", but we need to be in the django-skeleton/ directory. Please navigate there and run this script again.' % CURRENT_DIRECTORY print ' * If you need to re-download a fresh copy of this repo, please execute the command "%s"' % GITHUB_CLONE_CMD sys.exit(1) # lets see if we can find myproject if not CURRENT_PROJECT_NAME in get_folder_list_for_directory(CURRENT_DIRECTORY): print 'Error: Script cannot continue' print ' * We failed to find "%s" in directory "%s". This might have happened because you already renamed the project.' % (CURRENT_PROJECT_NAME, CURRENT_DIRECTORY) print ' * This script depends on an exact file structure to work properly.' print ' * If you need to re-download a fresh copy of this repo, please execute the command "%s"' % GITHUB_CLONE_CMD sys.exit(1) ################################################################################ # Main script routine - get a new name for the project ################################################################################ input_error = True print HASH100 print '#%sWelcome to django-skeleton!%s#' % (' '*35, ' '*36) print '#%sWe will ask you for a project and first app name. Enjoy!%s#' % (' '*21, ' '*21) while input_error: print HASH100 print 'Your current project name is "%s". Please enter a new outer project name (blank to skip): ' % CURRENT_PROJECT_NAME new_project_name = get_user_feedback() if new_project_name: input_error = validate_name(new_project_name) if input_error: print input_error continue # print 'You entered "%s" as your new project name.' % new_project_name else: print 'You skipped renaming your default project. The current name of "%s" still stands.' % CURRENT_PROJECT_NAME break ################################################################################ # Main script routine - get a new name for the app ################################################################################ input_error = True while input_error: print BAR100 print 'Your current default app name is "%s". Please enter a new app name (blank to skip): ' % CURRENT_APP_NAME new_app_name = get_user_feedback() if new_app_name: input_error = validate_name(new_app_name) if input_error: print input_error continue # print 'You entered "%s" as your new default app name.' % new_app_name else: print 'You skipped renaming your default app. The current name of "%s" still stands.' % CURRENT_APP_NAME break # print BAR100 if not new_app_name and not new_project_name: print 'You decided not to rename your project or app. This script has nothing to do. Goodbye!' sys.exit(0) if new_app_name == new_project_name: print 'You must select different names for your project and first application. This script cannot continue.' sys.exit(1) ################################################################################ # Main script routine - build a list of files that need to be renamed ################################################################################ # this is a list of tuples, with (orig_path, dest_path) # we cache all renames till the end, so we can print them out # and confirm them with the user rename_cache = [] # file string replace cache # contains tuples, (path_to_file, find_string, replace_string) file_string_replace = [] # walk the directory and perform renames for root, dirs, files in os.walk(CURRENT_DIRECTORY): # ignore git metadata if '.git' in root: continue if new_project_name and CURRENT_PROJECT_NAME in dirs+files: rename_cache.append((os.path.join(root, CURRENT_PROJECT_NAME), os.path.join(root, new_project_name))) if new_app_name and CURRENT_APP_NAME in dirs+files: rename_cache.append((os.path.join(root, CURRENT_APP_NAME), os.path.join(root, new_app_name))) # build a list of files that need to have strings replaced for f in files: fullpath = os.path.join(root, f) # ignore hidden files, this file, and non-python files if f.startswith('.') or f == __file__.lstrip('./') or not f.endswith('.py'): continue with open(fullpath, 'r') as fh: fdata = fh.read() if new_project_name and CURRENT_PROJECT_NAME in fdata: file_string_replace.append((fullpath, CURRENT_PROJECT_NAME, new_project_name)) if new_app_name and CURRENT_APP_NAME in fdata: file_string_replace.append((fullpath, CURRENT_APP_NAME, new_app_name)) if not rename_cache: print 'Error: Script cannot continue' print ' * We did not find any files to rename. This is considered an error' print ' * If you need to re-download a fresh copy of this repo, please execute the command "%s"' % GITHUB_CLONE_CMD sys.exit(1) # reverse this so execution renames the deepest directories first rename_cache.reverse() if not file_string_replace: print 'Error: Script cannot continue' print ' * We did not find any files in which to replace project/app text. This is considered an error' print ' * If you need to re-download a fresh copy of this repo, please execute the command "%s"' % GITHUB_CLONE_CMD sys.exit(1) ################################################################################ # Main script routine - tell user what we are about to do, and ask for feedback ################################################################################ # for trename in rename_cache: # print 'RENAME "%s" => "%s"' % trename # for treplace in file_string_replace: # print 'MODIFY FILE "%s" REPLACE "%s" WITH "%s"' % treplace # print 'We are about to rename %d files, and modify %d files. Please confirm that you would like to do this [y/n] (default y): ' % (len(rename_cache), len(file_string_replace)) # print 'We are about to modify %d files. Please confirm that you would like to do this [y/n] (default y): ' % (len(rename_cache) + len(file_string_replace)) # if 'n' in get_user_feedback().lower(): # print 'Okay, we won\'t do anything. Goodbye!' # sys.exit(0) ################################################################################ # Main script routine - actually do all processing ################################################################################ # print BAR100 # do text replace for treplace in file_string_replace: replace_text_infile(*treplace) # unpack tuple # do renames for trename in rename_cache: rename(*trename) # unpack tuple ################################################################################ # Final sanity checks ################################################################################ if new_project_name and CURRENT_PROJECT_NAME in get_folder_list_for_directory(CURRENT_DIRECTORY): print 'Error: Script hit fatal error' print ' * It seems "%s" still exists after we were supposed to rename it. Not sure what happened... bailing...' % CURRENT_PROJECT_NAME print ' * If you need to re-download a fresh copy of this repo, please execute the command "%s"' % GITHUB_CLONE_CMD sys.exit(1) ################################################################################ # Offer to remove git metadata, README, and this file ################################################################################ print BAR100 print 'If you would like, we can remove the git metadata, github README.md, and this setup.py script' print ' * This will leave you in a clean folder with your fresh django project inside' print ' * We strongly recommend you perform this automatic cleanup' print 'Would you like to do this? [y/n] (default y): ' if 'n' in get_user_feedback().lower(): print 'Okay, we won\'t remove anything' else: # remove metadata toremove = [os.path.join(CURRENT_DIRECTORY, 'setup.py'), os.path.join(CURRENT_DIRECTORY, 'README.md'), os.path.join(CURRENT_DIRECTORY, '.git'), os.path.join(CURRENT_DIRECTORY, '.gitignore'), os.path.join(CURRENT_DIRECTORY, 'screenshot.png')] for f in toremove: try: if os.path.isdir(f): shutil.rmtree(f) else: os.remove(f) # print 'REMOVED "%s"' % f except: # print 'FAILED TO REMOVE "%s"' % f pass ################################################################################ # Done! ################################################################################ print '\nYour project is ready. Hope you enjoyed django-skeleton!\n' print ' Next steps:' print ' 1) cd %s/' % new_project_name print ' 2) python manage.py runserver' print ' 3) Navigate to http://127.0.0.1:8000/ to enjoy your starter template\n' print ' * Checkout my website http://mschettler.com or fork this repo at %s' % (GITHUB_CLONE_CMD.split(' ')[-1]) print ' * Feeling generous? I accept BTC, Thank you! 18ZYJyeFAVUiGeWEZRu63NGwrr1ecz1eVR\n' sys.exit(0) main()
UTF-8
Python
false
false
2,014
3,564,822,886,332
9e29997260079a7011090cc86364058779b14c8d
c540101d515e239d9fe6d038536734d0775ec27f
/src/resources/coverhelper.py
57c7fccb5525bdbc2e0e0fe1930561262262092d
[]
no_license
HDMU/Mediaportal-for-HDMU
https://github.com/HDMU/Mediaportal-for-HDMU
7ba3441444c0b9dab92aad1f3423e1ebb84a5015
99376ea3c94155ba6e8310533a1c84a5aac7f033
refs/heads/master
2016-10-15T02:43:02.983275
2014-04-14T21:57:06
2014-04-14T21:57:06
8,764,343
1
0
null
null
null
null
null
null
null
null
null
null
null
null
null
# -*- coding: utf-8 -*- from Plugins.Extensions.MediaPortal.resources.imports import * from twisted.web.client import downloadPage from enigma import gPixmapPtr, ePicLoad, eTimer from Components.AVSwitch import AVSwitch from Components.Pixmap import Pixmap from Tools.Directories import fileExists from Components.config import config import mp_globals from debuglog import printlog as printl class CoverHelper: COVER_PIC_PATH = "/tmp/Icon.jpg" NO_COVER_PIC_PATH = "/images/no_coverArt.png" def __init__(self, cover, callback=None, nc_callback=None): self._cover = cover self.picload = ePicLoad() self._no_picPath = "%s/skins/%s%s" % (mp_globals.pluginPath, config.mediaportal.skin.value, self.NO_COVER_PIC_PATH) self._callback = callback self._nc_callback = nc_callback self.coverTimerStart = eTimer() def getCoverTimerStart(self): self.coverTimerStart.startLongTimer(20) def getCover(self, url): self.getCoverTimerStart() print "getCover:", url if url: downloadPage(url, self.COVER_PIC_PATH).addCallback(self.showCover).addErrback(self.dataErrorP) else: self.showCoverNone() def dataErrorP(self, error): print "dataErrorP:" printl(error,self) self.showCoverNone() def showCover(self, picData): print "_showCover:" self.showCoverFile(self.COVER_PIC_PATH) def showCoverNone(self): print "_showCoverNone:" if self._nc_callback: self._cover.hide() self._nc_callback() else: self.showCoverFile(self._no_picPath) def showCoverFile(self, picPath): print "showCoverFile:" if fileExists(picPath): self._cover.instance.setPixmap(gPixmapPtr()) scale = AVSwitch().getFramebufferScale() size = self._cover.instance.size() self.picload.setPara((size.width(), size.height(), scale[0], scale[1], False, 1, "#FF000000")) if self.picload.startDecode(picPath, 0, 0, False) == 0: ptr = self.picload.getData() if ptr != None: self._cover.instance.setPixmap(ptr) self._cover.show() else: printl("Coverfile not found: %s" % picPath, self, "E") if self._callback: self._callback()
UTF-8
Python
false
false
2,014
10,256,381,928,953
8c1e03edd06a2be00c890b3a048f84d3d1aa251d
6e4ccd400900f8efa708bf8ff86315a5dfe24c91
/src/resbook/helpers.py
8fa962bab523baff881fc2a98b73e0ee1825277d
[]
no_license
fcanela/donatello
https://github.com/fcanela/donatello
a51756a0ca08d0e906f1cb8991f7da8f744e96ac
0d50aad713c471b4d7a659ee058ac601c0a74798
HEAD
2016-09-10T18:29:07.971728
2012-08-21T09:39:36
2012-08-21T09:39:36
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
from django.shortcuts import render_to_response from django.core.context_processors import csrf def craft_response(request, template, params=None): if params == None: params = {} params.update(csrf(request)) return render_to_response(template, params)
UTF-8
Python
false
false
2,012
15,496,242,035,540
5c2e42f532b1700a84e94dfaa72db50dd6f7a18a
3ed684cfd07ab85b7d6dffebfe15de17ae62bad7
/apps/games/boardgamegeek/boardgamegeek/items.py
749853481e664bf95a10ff7d06290be51b6519d7
[]
no_license
MihaZelnik/meeple.co
https://github.com/MihaZelnik/meeple.co
b31884eb5de7e7e908ba6c755fe596403e7543e3
15cf7b57a02a5076b1231cf320af0e15802b1d6b
refs/heads/master
2020-02-24T07:34:49.852537
2014-11-30T13:21:45
2014-11-30T13:21:45
25,835,092
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
# -*- coding: utf-8 -*- # Define here the models for your scraped items # # See documentation in: # http://doc.scrapy.org/en/latest/topics/items.html from scrapy.item import Field, Item class BoardgamegeekItem(Item): totalplays = Field() averagerating = Field() numviews = Field() playsthismonth = Field() userswanting = Field() userstrading = Field() numratings = Field() avggameweight = Field() fans = Field() rank = Field() usersowning = Field() yearpublished = Field() minplayers = Field() maxplayers = Field() playingtime = Field() age = Field() name = Field() thumbnail = Field() image = Field() boardgamepublisher = Field() boardgamedesigner = Field() boardgameartist = Field() boardgameexpansion = Field() boardgamecategory = Field() boardgamemechanic = Field() boardgamesubdomain = Field() boardgamefamily = Field() language_dependence = Field() url = Field() bgg_ranking = Field() objectid = Field() expansion = Field() pass
UTF-8
Python
false
false
2,014
7,834,020,363,079
113c2ac7a0ed4a311733764e1142a16c99afefd0
23aaa0182bc24cd52f9342b8198ec2b707d8f377
/toru-bot/source/riskrisk-torubot/Twitpic.py
6fc61be606956ad9cba9c096d95059d28474f90f
[]
no_license
risk/risk
https://github.com/risk/risk
0e60def2c620626e0ca779535a9700e92e5a6c67
ae4bdd512254e2f907cff3e411785dcabcf04733
refs/heads/master
2020-04-08T09:45:17.188322
2014-01-26T15:01:59
2014-01-26T15:01:59
4,062,972
1
0
null
null
null
null
null
null
null
null
null
null
null
null
null
#!/usr/bin/env python # -*- coding: utf-8 -*- import urllib import datetime from xml.etree.ElementTree import * import Timezone import Bitly class Twitpic(): username = u'' tree = None rootElem = None twitpic = u'http://twitpic.com/' twitpicUsersShow = u'http://api.twitpic.com/2/users/show.xml' def __init__(self, name): self.username = name def xmlOpen(self, page=1): site = urllib.urlopen(u''.join([self.twitpicUsersShow,u'?',u'username=',self.username,u'&',u'page=',str(page)])) # XMLのパース self.tree = parse(site) self.rootElem = self.tree.getroot() def getUserPicList(self, now): # XMLを開く self.xmlOpen() # 戻り値 rtnList = [] # イメージのURLを取得する images = self.rootElem.find(u'./images') if images: # イメージ一覧を取得 imgList = images.findall(u'./image') if imgList: # 基準を前日の23時に以降に設定する checkDate = now - datetime.timedelta(days=1) checkDate = checkDate.replace(hour=23, minute=0, second=0, microsecond=0) done = False for i in range(1, 20): for elem in imgList: timeStamp = datetime.datetime.strptime( \ elem.find(u'./timestamp').text,'%Y-%m-%d %H:%M:%S') timeStamp = timeStamp.replace(tzinfo=Timezone.UTC()).astimezone(Timezone.JST()) # 基準範囲をチェック if(timeStamp < checkDate): # 取得範囲外になった場合は、終了 done = True break # リストに追加 #b = Bitly.Bitly(u''.join([self.twitpic, elem.find(u'./short_id').text])) rtnList.append(u''.join([self.twitpic, elem.find(u'./short_id').text])) if done: break # 次のページを開く self.xmlOpen(page=i) return rtnList def main(): tp = Twitpic(u'catcafenekorobi') tp.xmlOpen() lst = tp.getUserPicList(datetime.datetime.now(Timezone.JST())) for s in lst: print s pass if __name__ == '__main__': main()
UTF-8
Python
false
false
2,014
15,874,199,149,421
d6e3367c4ef19ffcbdf348e9cba5acb5095655fa
3404978caf496d4c64a09a4b86a0ffec248e1f3a
/test/old_tests/test_geocoder.py
c39e1b0938c347ae14c2f81022d7522fbd447657
[ "BSD-2-Clause" ]
permissive
kball/ambry
https://github.com/kball/ambry
bcc8e3550e8fad532beaabb6bda6d074b816a19a
ae865245128b92693d654fbdbb3efc9ef29e9745
refs/heads/master
2021-01-01T17:56:21.357529
2014-04-22T18:40:07
2014-04-22T18:40:07
19,428,611
1
0
null
null
null
null
null
null
null
null
null
null
null
null
null
""" Created on Jan 17, 2013 @author: eric """ import unittest from testbundle.bundle import Bundle from ambry.run import RunConfig from test_base import TestBase import os class Test(TestBase): def setUp(self): self.copy_or_build_bundle() self.bundle = Bundle() def tearDown(self): pass def test_basic(self): from ambry.geo.geocoder import Geocoder g = Geocoder(self.bundle.library) filename = "good_segments" f_input = os.path.join(os.path.dirname(__file__), '../support',filename + '.txt') f_output = os.path.join(os.path.dirname(__file__), '../support',filename + '.out.csv') with open(f_input) as f: for line in f: addr = line.strip() r = g.geocode_address(addr) print "==", addr print "->",r if r: print " ", r['codedaddress'] def write_error_row(self, code, arg, p, w, address, city): try: ps = p.parse(address) except: ps = False if not ps: row = [code, arg, address, city] else: row = [code, arg, address, city, ps.number, ps.street_direction, ps.street_name, ps.street_type] w.writerow(row) def x_test_crime(self): from ambry.geo.address import Parser from ambry.geo.geocoder import Geocoder import csv g = Geocoder(self.bundle.library, addresses_ds='geoaddresses') _,incidents = self.bundle.library.dep('crime') log_rate = self.bundle.init_log_rate(1000) p = Parser() with open(self.bundle.filesystem.path('errors.csv'), 'wb') as f: writer = csv.writer(f) writer.writerow(['code','arg','block_address','city','number','dir','street','type']) multi_cities = 0.0 multi_addr = 0.0 no_response = 0.0 for i, inct in enumerate(incidents.query("SELECT * FROM incidents limit 100000")): row = dict(inct) candidates = g.geocode_semiblock(row['blockaddress'], row['city'], 'CA') if len(candidates) == 0: no_response += 1 self.write_error_row('norsp',0, p,writer,row['blockaddress'], row['city']) continue elif len(candidates) != 1: multi_cities += 1 self.write_error_row('mcities',len(candidates), p,writer,row['blockaddress'], row['city']) continue s = candidates.popitem()[1] if len(s) > 3: self.write_error_row('maddr',len(s), p,writer,row['blockaddress'], row['city']) multi_addr +=1 if i > 0: log_rate("{} cities={}, {}% addr={}, {}% nrp={}, {}%".format(i, multi_cities, int(multi_cities/i * 100), multi_addr, int(multi_addr/i * 100), no_response, int(no_response/i * 100) )) def test_place_coder(self): from ambry.geo.geocoder import PlaceCoder pc = PlaceCoder(self.bundle.library) places = self.bundle.library.dep('places').partition for place in places.rows: try: in_places = [ x['name'] for x in pc.lookup_wgs(place['lat'], place['lon'])] except ValueError: continue # Some of the centroids aren't in the regions, since there are complicated region # shapes, and some cities hold parcels in the east county. if not place['name'] in in_places: print place['type'], place['name'], in_places if __name__ == "__main__": #import sys;sys.argv = ['', 'Test.testName'] unittest.main()
UTF-8
Python
false
false
2,014
8,529,805,072,641
74b2cbdbe6661adbef68043c4492b4109094e012
7da41c1943a69f687ac1f121edc2c9f70cd93110
/probability.py
4bbc59434eb6fc812a5c25b0905a5a0012f5645e
[]
no_license
yuqiaoyan/Python
https://github.com/yuqiaoyan/Python
e636f699366d685dd271c61d24f16aa4a8b79547
2f43dfeb2a517b77d0f6ba4c3883366ea4b1a2a9
refs/heads/master
2021-01-19T18:29:45.711613
2012-10-28T16:06:33
2012-10-28T16:06:33
3,541,184
1
1
null
null
null
null
null
null
null
null
null
null
null
null
null
import random import sys from numpy import* from math import* import matplotlib.pyplot as plt #Each time you collect a coupon, you are equally likely to get type N coupon #What is the expected # of coupons you have to collect, so you can #complete a set of coupons? def getNumCoupons(numType): #give numType coupons, this function will return the number of #coupons you have to collect numTypeList = [i for i in range(1,numType)] numObs = 0 #once we remove all the items from the list we've completed the set while(numTypeList): obs = random.randint(1,numType) numObs += 1 if obs in numTypeList: try: numTypeList.remove(obs) except: print "Issue with random generator and numTypeList" return numObs def simExperiments(num, numType): #simulate num Experiments and save the collection of results in a list #return the list of results result = [] #run the experiment num times for i in range(0,num): numCoupons = getNumCoupons(numType) #define numType of coupons in cmd line result.append(numCoupons) print "Coupons is: " + str(numCoupons) return numCoupons def drawHist(aHistogram): fig = plt.figure() ax = fig.add_subplot(111) ax.hist(aHistogram,normed=1,facecolor='green',alpha=0.75) plt.show() if __name__ == '__main__': #cmd line: "python probability.py numExperiments numCouponType" # #python probability.py 1000 7" types = int(sys.argv[2]) resultList = simExperiments(int(sys.argv[1]),types) resultArray = array(resultList) print "The expected number of coupons for " + sys.argv[1] + " number of experiments is: " print str(mean(resultArray)) print "The expected number based on GEOMETRIC DISTRIBUTION is: " print log(types)*types print "The expected number based on BINOMIAL DISTRIBUTION is: " print types*(1.0/types)*( pow((double(types - 1)/types),types-1))*types #drawHist(resultArray) drawHist(histogram(resultArray, bins = 10))
UTF-8
Python
false
false
2,012
7,730,941,174,823
374e5ef08c32c331ee421506bd307e512d36f3d9
d439c5c32c1c73b184328d0034c972b0c8bccb4b
/Project Euler/Problem2.py
16df1fd35f4514cdb148d78a94709efd85066215
[]
no_license
Hunter275/Python-Stuff
https://github.com/Hunter275/Python-Stuff
07014154d44ef6b21910dde2c82b2b02d1ec82d9
79130d7da6c2d5c4414f98ad71f61173b9719ea7
refs/heads/master
2021-01-25T07:28:45.218721
2014-11-01T18:30:45
2014-11-01T18:30:45
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
a, b = 0, 1 numsum = 0 while b < 4000000: if b % 2 == 0: numsum = b + numsum a, b = b, a+b print numsum
UTF-8
Python
false
false
2,014
438,086,677,049
f283b070e400b56ed455237946dbfe1f23a60a85
216a2c06bad45fccb1ab40aedc7bda55d4d468ab
/test_sg_create.py
2f65446eb27a08a37e646601b8a3e25c49446c53
[]
no_license
yosef-cloud/tarkin
https://github.com/yosef-cloud/tarkin
60391703197bf990ddb530b36736eb8dbc5674f2
9241439e898fa33540c5241c2068498d86393f4a
refs/heads/master
2021-01-16T20:51:30.697025
2012-08-03T22:57:05
2012-08-03T22:57:40
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
import re import time from lib.sgtest import SGTest class SGCreate(SGTest): def main(self): tmp_group = 'tmp_' + self.rndstr(6) self.log('creatin group: %s ' % tmp_group) sw = self.get_stopwatch() self.create_group(tmp_group) ellapsed_time = sw.stop() group_names = [x.name for x in self.list_sg_groups()] self.log("list of groups: %s" % str(group_names) ) _result = tmp_group in group_names self.add_result(test_name=self.__class__.__name__, result=_result, ellapsed_time=ellapsed_time) # remove the group time.sleep(1) self.delete_group(tmp_group) if __name__ == '__main__': o = SGCreate() o.main() o.emit_results()
UTF-8
Python
false
false
2,012
3,650,722,245,415
de6e2ee482db62aa8a2617794602828f41357f94
6c1a1657c8e75f2d9265ec2d3e7fe843e858eeaa
/Web app/app/sentimentAnalysis.py
57da96c3cc504f65fe1608d6365a9c38cabefd75
[]
no_license
Zeliax/DataMiningUsingPython
https://github.com/Zeliax/DataMiningUsingPython
c5f94422650690fa6a1f2268f4986d6de4badd64
2759316347298525a937a64bff9c08554f3a3579
refs/heads/master
2021-01-17T06:24:43.579860
2014-12-08T10:04:55
2014-12-08T10:04:55
23,545,979
2
0
null
null
null
null
null
null
null
null
null
null
null
null
null
"""Contains the functions for making sentiment analysis.""" from detect_lang import LanguageDetector from nltk.tokenize import RegexpTokenizer import numpy as np import codecs import os def wordlist_to_dict(): """Create a dictionary from a wordlist.""" path = os.getcwd() # Runs from web app folder word_list = codecs.open(path + "\\app\\FINN-wordlist.txt", "r", encoding='utf8') def parse_line(line): word, sentiment = line.split('\t') return word, int(sentiment) word_dict = dict([parse_line(line) for line in word_list]) word_list.close() return word_dict def sentiment(words, word_dict): """Calculalte the sentiment score. Calculates the sentiment score for each word from a tokenized sentence and stores them in a list. """ sent_values = [word_dict[word] for word in words if word in word_dict] if not sent_values: sent_values = -1 return sent_values def sentiment_analysis(commentlist, wordlist): """Calculate the mean sentiment of each comment. Keyword arguments: commentlist -- a list of lists of comments """ # total_sentiment = 0 tokenizer = RegexpTokenizer(r'[a-z]+') all_sentiment = [] all_unknown = [] ld = LanguageDetector() for video in commentlist: video_sentiment = [] neutral = [] for comment in video: if ((ld.get_language(comment) == 'english') and (type(comment) is str)): comment = comment.lower() comment = " ".join([word for word in comment.split() if "http" not in word]) words = tokenizer.tokenize(comment) sentiment_score = sentiment(words, wordlist) if sentiment_score == -1: neutral.append(sentiment_score) else: # video_sentiment is a list of sentiments for each video. video_sentiment.append(np.mean(sentiment_score)) # all_sentiment is a list of sentiment scores for all the videos. all_unknown.append(neutral) all_sentiment.append(video_sentiment) return all_sentiment, all_unknown
UTF-8
Python
false
false
2,014
14,285,061,231,589
646252e6555c9cfd88163543584ff475342cdbe7
2a696e43a5f136d8a037f8d8409d0dae170fb1e4
/blenderProject/Blender_anton_junk/test_materials.py
01726e429e25d7d5df0fc0e234eb127f89ee7f39
[]
no_license
Redoxee/blender_python
https://github.com/Redoxee/blender_python
825fbefa8fde6f60f6df79667cc6f8c871351a06
2e87e3833f7191808d788ad2c32489442fcc96f9
refs/heads/master
2020-03-30T08:11:20.576035
2014-12-07T15:47:17
2014-12-07T15:47:17
9,346,146
1
0
null
null
null
null
null
null
null
null
null
null
null
null
null
''' Copyright (c) 2013 Anton Roy Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ''' bl_info = { "name": "Katana City Generator", "author": "Anton Roy, Antoin Berry", "version": (0, 9), "blender": (2, 66, 0), "location": "View3D > Tool", "description": "Create Cities", "warning": "Heavy scripts", "wiki_url": "", "tracker_url": "", "category": "Add Mesh"} from collections import namedtuple from mathutils import Vector import bpy from random import random, seed #import parser import math from bpy.props import * from bpy.app.handlers import persistent import os ############################################################################################################### # # ############################################################################################################### segment = namedtuple('segment','p1,p2') ############################################################################################################### # # ############################################################################################################### def point_in_poly(x,y,poly): n = len(poly) inside = False p1x,p1y,p1z = poly[0] for i in range(n+1): p2x,p2y,p2z = poly[i % n] if y > min(p1y,p2y): if y <= max(p1y,p2y): if x <= max(p1x,p2x): if p1y != p2y: xints = (y-p1y)*(p2x-p1x)/(p2y-p1y)+p1x if p1x == p2x or x <= xints: inside = not inside p1x,p1y = p2x,p2y return inside def average_position(polygone): vert = Vector((0,0,0)) if len(polygone) == 0 : return vert for v in polygone : vert = vert + v return vert / len(polygone) def barycentre_weighted(p1, m1, p2, m2): """ point p1 with mass m1, p2 with mass m2 """ return Vector((float(m1*p1.x + m2*p2.x)/(m1+m2), float(m1*p1.y + m2*p2.y)/(m1+m2), float(m1*p1.z + m2*p2.z)/(m1+m2))) def barycentre(list): """ Calcul the barycentre of n points, argument : list of couples (p, m), p a point (Vector), m its mass """ if len(list) == 0: return Vector((0, 0, 0)) if len(list) == 1: return list[0][0] sum_x, sum_y, sum_z = 0, 0, 0 sum_mass_x, sum_mass_y, sum_mass_z = 0, 0, 0 for i in range(len(list)): p, m = list[i][0], list[i][1] sum_x += p.x sum_y += p.y sum_z += p.z sum_mass_x += m sum_mass_y += m sum_mass_z += m return Vector((float(sum_x)/sum_mass_x, float(sum_y)/(sum_mass_y), float(sum_z)/(sum_mass_z))) def area(p): return 0.5 * abs(sum(x0*y1 - x1*y0 for ((x0, y0, z0), (x1, y1, z0)) in segments(p))) def segments(p): return zip(p, p[1:] + [p[0]]) def generate_polygon(center, radius, n): """ Generates a regular polygon with n sides, within the circle (center, radius) """ polygon = [] for i in range(n): alpha = 2 * math.pi * i / n polygon.append(Vector(((center.x + math.cos(alpha)*radius), (center.y + math.sin(alpha)*radius), center.z))) return polygon def get_polygone_orientation(polygone = []): res = 0 for i in range(len(polygone)): x1,y1,z1 = polygone[i] x2,y2,z2 = polygone[(i+1)%len(polygone)] res += (x1 * y2 - x2 * y1) return res def resize_polygone_from_center(polygone = [], factor = 1): average = average_position(polygone) tempPol = [((v - average)*factor)+average for v in polygone] return tempPol def get_random_point_in_bounds(polygone=[]): if(len(polygone) == 0): return Vector((0,0,0)) xmin,ymin,zmin = polygone[0] xmax,ymax,zmax = polygone[0] for v in polygone : xmin = v.x if v.x < xmin else xmin ymin = v.y if v.y < ymin else ymin zmin = v.z if v.z < zmin else zmin xmax = v.x if v.x > xmax else xmax ymax = v.y if v.y > ymax else ymax zmax = v.z if v.z > zmax else zmax candidat = Vector((random() * (xmax - xmin) + xmin,random()*(ymax - ymin) + ymin,random()*(zmax - zmin) + zmin)) while not point_in_poly(candidat.x,candidat.y,polygone): candidat = Vector((random() * (xmax - xmin) + xmin,random()*(ymax - ymin) + ymin,random()*(zmax - zmin) + zmin)) return candidat def arrange_triangle(polygon): #return if the polygon is not a triangle if len(polygon) != 3: return; #the 3 points of the triangle a, b, c = polygon[0], polygon[1], polygon[2] #vectors of the 3 edges : ab, bc, ca = Vector((b.x - a.x, b.y - a.y, b.z - a.z)), Vector((c.x - b.x, c.y - b.y, c.z - b.z)), Vector((a.x - c.x, a.y - c.y, a.z - c.z)) abc, bca, cab = angle_between(ab, bc), angle_between(bc, ca), angle_between(ca, ab) min_angle = max(abc, bca, cab) returnPolygon = [] cut = (1, 7) if abc == min_angle: returnPolygon = [a, barycentre_weighted(a, cut[0], b, cut[1]), barycentre_weighted(c, cut[0], b, cut[1]), c] elif bca == min_angle: returnPolygon = [b, barycentre_weighted(b, cut[0], c, cut[1]), barycentre_weighted(a, cut[0], c, cut[1]), a] else: returnPolygon = [c, barycentre_weighted(c, cut[0], a, cut[1]), barycentre_weighted(b, cut[0], a, cut[1]), b] return returnPolygon def angle_between(v1, v2): return math.acos(float(dot_product(v1, v2))/(v1.length*v2.length)) def dot_product(v1, v2): return v1.x*v2.x + v1.y*v2.y + v1.z*v2.z def get_edes_for_poly(polygone): nb_verts = len(polygone) edges =[] for i in range(nb_verts): edges.append((i,(i+1) % nb_verts)) return edges def center(polygon): return barycentre(list(map(lambda p: (p, 1), polygon))) ############################################################################################################### # # ############################################################################################################### def split_polygone_by_side(polygone = [] , indice_1 = 0 ,indice_2 = 0): if len(polygone) < 3 or indice_1 == indice_2 : return [polygone] side1 =segment(polygone[indice_1 % len(polygone)], polygone[(indice_1 + 1) % len(polygone)]) side2 =segment(polygone[indice_2 % len(polygone)], polygone[(indice_2 + 1) % len(polygone)]) coupure_segment = 0.5 # ou va etre coupe le cote du polygone, 05 est le millieu nx = side1.p1.x + coupure_segment * (side1.p2.x - side1.p1.x) ny = side1.p1.y + coupure_segment * (side1.p2.y - side1.p1.y) nz = side1.p1.z + coupure_segment * (side1.p2.z - side1.p1.z) np1 = Vector((nx,ny,nz)) nx = side2.p1.x + coupure_segment * (side2.p2.x - side2.p1.x) ny = side2.p1.y + coupure_segment * (side2.p2.y - side2.p1.y) nz = side2.p1.z + coupure_segment * (side2.p2.z - side2.p1.z) np2 = Vector((nx,ny,nz)) #construction du polygone 1 sous_poly_1 = [np1] indice_d_arret = (indice_2 + 1) % len(polygone)# on sarrete quand on arrive a la fin du deuxieme segment croises indice = (indice_1 + 1) % len(polygone) # on commence a la fin du premier segment croises while not indice == indice_d_arret: sous_poly_1.append(polygone[indice]) indice = (indice + 1) % len(polygone) sous_poly_1.append(np2) #construction du polygone 2 sous_poly_2 = [np2] indice_d_arret = (indice_1 +1) % len(polygone)# on sarrete quand on arrive au debut du premier segment croises indice = (indice_2 + 1) % len(polygone)# on commence debut du deuxieme segment croises while not indice == indice_d_arret: sous_poly_2.append(polygone[indice]) indice = (indice + 1) % len(polygone) sous_poly_2.append(np1) return[sous_poly_1,sous_poly_2] def split_polygone(polygone = []): if len(polygone) <3 : return [] list_indice_length = [] for i in range(len(polygone)): v1 = polygone[i] v2 = polygone[(i+1) % len(polygone) ] length = ((v1.x - v2.x)*(v1.x - v2.x)) + ((v1.y - v2.y) * (v1.y - v2.y)) list_indice_length.append((i,length)) list_indice_length.sort(key=lambda side: -side[1]) side1 = segment(polygone[list_indice_length[0][0]],polygone[(list_indice_length[0][0] +1 )% len(polygone)]) if list_indice_length[1][1] == list_indice_length[2][1] : list_indice_length[1] = (list_indice_length[2][0],list_indice_length[1][1]) side2 = segment(polygone[list_indice_length[1][0]],polygone[(list_indice_length[1][0] +1 )% len(polygone)]) coupure_segment = 0.5 # ou va etre coupe le cote du polygone, 05 est le millieu nx = side1.p1.x + coupure_segment * (side1.p2.x - side1.p1.x) ny = side1.p1.y + coupure_segment * (side1.p2.y - side1.p1.y) nz = side1.p1.z + coupure_segment * (side1.p2.z - side1.p1.z) np1 = Vector((nx,ny,nz)) nx = side2.p1.x + coupure_segment * (side2.p2.x - side2.p1.x) ny = side2.p1.y + coupure_segment * (side2.p2.y - side2.p1.y) nz = side2.p1.z + coupure_segment * (side2.p2.z - side2.p1.z) np2 = Vector((nx,ny,nz)) #construction du polygone 1 sous_poly_1 = [np1] indice_d_arret = (list_indice_length[1][0] + 1) % len(polygone)# on sarrete quand on arrive a la fin du deuxieme segment croises indice = (list_indice_length[0][0] + 1) % len(polygone) # on commence a la fin du premier segment croises while not indice == indice_d_arret: sous_poly_1.append(polygone[indice]) indice = (indice + 1) % len(polygone) sous_poly_1.append(np2) #construction du polygone 2 sous_poly_2 = [np2] indice_d_arret = (list_indice_length[0][0] +1) % len(polygone)# on sarrete quand on arrive au debut du premier segment croises indice = (list_indice_length[1][0] + 1) % len(polygone)# on commence debut du deuxieme segment croises while not indice == indice_d_arret: sous_poly_2.append(polygone[indice]) indice = (indice + 1) % len(polygone) sous_poly_2.append(np1) return(sous_poly_1,sous_poly_2) def split_polygon_from_center(polygon, ratio=(1, 1)): n_points = len(polygon) if n_points < 3: return polygone c = center(polygon) center_polygon = [] for i in range(n_points): center_polygon.append(barycentre([(c, ratio[0]), (polygon[i], ratio[1])])) out_polygons = [] for i in range(n_points): next_i = i+1 if next_i == n_points: next_i = 0 out_polygons.append([polygon[i], polygon[next_i], center_polygon[next_i], center_polygon[i]]) out_polygons.append(center_polygon) return out_polygons def split_evenly(polygone = []): if len(polygone) <3 : return [] min_area = -1 indice1 = 0 indice2 = 0 nb_point = len(polygone) for i1 in range(nb_point-1): for i2 in range(nb_point)[i1+1:nb_point]: sp1,sp2 = split_polygone_by_side(polygone,i1,i2) a1 = area(sp1) a2 = area(sp2) tres = (a1-a2)*(a1-a2) print(str(tres)) if(tres < min_area or min_area < 0): min_area = tres indice1 = i1 indice2 = i2 print("indice1 " + str(indice1) + " indice2 " + str(indice2)) if indice1 == indice2 : return [] return split_polygone_by_side(polygone,indice1,indice2) ############################################################################################################### # # ############################################################################################################### def subdivide_until_area(polygone = [],min_area = 5): if area(polygone) < min_area : return [polygone] else : pol1,pol2 = split_polygone(polygone) return subdivide_until_area(pol1,min_area) + subdivide_until_area(pol2,min_area) ############################################################################################################### # # ############################################################################################################### def add_obj(obdata, context): scene = context.scene obj_new = bpy.data.objects.new(obdata.name, obdata) base = scene.objects.link(obj_new) return obj_new,base def select_obj(obj,base,mesh): bpy.context.scene.objects[mesh.name].select = True base.select = True bpy.context.scene.objects.active = obj def deselect_obj(base,mesh): base.select = False bpy.context.scene.objects[mesh.name].select = False bpy.context.scene.objects.active = None ############################################################################################################### # # ############################################################################################################### def get_poly_from_object(obj): res = [] data = obj.data location = obj.location if(not len(data.vertices) == len(data.edges)): return res for v in data.vertices: res.append(Vector(v.co + location)) return res def decoupe_selection_using_split_operator(area_min,split_function): poly_to_draw = [] for obj in bpy.context.selected_objects : poly = get_poly_from_object(obj) if area(poly) > area_min: poly_to_draw.append((split_function(poly),obj.name)) else: obj.select = False bpy.ops.object.delete(use_global=False) elem_added = [] for poly_couple in poly_to_draw: for poly in poly_couple[0]: o,b =dessine_polygone(poly,poly_couple[1]) elem_added.append((o,b)) for o,b in elem_added: bpy.context.scene.objects[o.name].select = True b.select = True def decoupe_selection(area_min): decoupe_selection_using_split_operator(area_min,split_polygone) def decoupe_selection_evenly(area_min): decoupe_selection_using_split_operator(area_min,split_evenly) def decoupe_selection_from_center(area_min): decoupe_selection_using_split_operator(area_min,split_polygon_from_center) ############################################################################################################### # # ############################################################################################################### def dessine_polygone(polygone,name): nb_verts = len(polygone) edges =[] for i in range(nb_verts): edges.append((i,(i+1) % nb_verts)) mesh = bpy.data.meshes.new(name) mesh.from_pydata(polygone, edges, []) mesh.update() obj,base = add_obj(mesh, bpy.context) centerposition = average_position(polygone) select_obj(obj,base,mesh) bpy.context.scene.cursor_location = centerposition bpy.ops.object.origin_set(type='ORIGIN_CURSOR') deselect_obj(base,mesh) return obj,base def dessine_simple_batiment(polygone =[], hauteur = 10, shrink = 0.7, name = 'poly'): edges = get_edes_for_poly(polygone) mesh = bpy.data.meshes.new(name) mesh.from_pydata(polygone, edges, []) mesh.update() obj,base = add_obj(mesh, bpy.context) centerposition = average_position(polygone) select_obj(obj,base,mesh) bpy.context.scene.cursor_location = centerposition bpy.ops.object.origin_set(type='ORIGIN_CURSOR') bpy.ops.object.mode_set(mode='EDIT') bpy.ops.transform.resize(value=(shrink, shrink, shrink)) bpy.ops.mesh.edge_face_add() bpy.ops.mesh.extrude_region_move() bpy.ops.transform.translate(value=(0,0,hauteur)) bpy.ops.mesh.select_all(action='SELECT') bpy.ops.mesh.normals_make_consistent(inside=False) bpy.ops.object.mode_set(mode='OBJECT') deselect_obj(base,mesh) return obj,base def dessine_batiment(hauteur_etage = 2,hauteur_inter_etage = 1,profondeur=0.8,nb_etage = 10,toit = 1): listIndiceFaces = [0] obj = bpy.context.selected_objects[0] etage = Vector((0,0,hauteur_etage)) inter = Vector((0,0,hauteur_inter_etage)) shrink = (profondeur,profondeur,profondeur) expande = (1 / profondeur,1 / profondeur,1 / profondeur) bpy.ops.object.mode_set(mode='EDIT') bpy.ops.mesh.edge_face_add() bpy.ops.transform.resize(value=shrink) for i in range(nb_etage - 1 ): bpy.ops.mesh.extrude_region_move() bpy.ops.transform.translate(value=etage ) bpy.ops.mesh.extrude_region_move() bpy.ops.transform.resize(value=expande) bpy.ops.mesh.extrude_region_move() bpy.ops.transform.translate(value=inter) bpy.ops.mesh.extrude_region_move() bpy.ops.transform.resize(value=shrink) bpy.ops.object.mode_set(mode='OBJECT') obj.data.update(calc_tessface=True) listIndiceFaces.append(len(obj.data.tessfaces)-1) bpy.ops.object.mode_set(mode='EDIT') bpy.ops.mesh.extrude_region_move() bpy.ops.transform.translate(value=inter) bpy.ops.transform.resize(value=(toit,toit,toit)) bpy.ops.mesh.extrude_region_move() bpy.ops.mesh.merge(type='CENTER', uvs=False) bpy.ops.mesh.select_all(action='SELECT') bpy.ops.mesh.normals_make_consistent(inside=False) bpy.ops.object.mode_set(mode='OBJECT') obj.data.update(calc_tessface=True) listIndiceFaces.append(len(obj.data.tessfaces)-1) print(str(listIndiceFaces)) # Create three materials red = bpy.data.materials.new('Red') red.diffuse_color = (1,0,0) blue = bpy.data.materials.new('Blue') blue.diffuse_color = (0,0,1) obj.data.materials.append(red) obj.data.materials.append(blue) j=0 for i in range(len(listIndiceFaces) - 1): j = i+1 indice_debut_etage = listIndiceFaces[i] indice_fin_etage = listIndiceFaces[j] for indFace in range(indice_debut_etage,indice_fin_etage ): obj.data.tessfaces[indFace].material_index = i%2 def dessine_maison(hauteur_etage = 2,hauteur_inter_etage = 1,profondeur=0.8,nb_etage = 10,toit = 1): etage = Vector((0,0,hauteur_etage)) inter = Vector((0,0,hauteur_inter_etage)) shrink = (profondeur,profondeur,profondeur) expande = (1 / profondeur,1 / profondeur,1 / profondeur) bpy.ops.object.mode_set(mode='EDIT') bpy.ops.mesh.edge_face_add() bpy.ops.mesh.extrude_region_move() bpy.ops.transform.translate(value=inter * 0.4) bpy.ops.transform.resize(value=(0.9,0.9,0.9)) bpy.ops.mesh.extrude_region_move() bpy.ops.transform.resize(value=(0.9,0.9,0.9)) for i in range(nb_etage - 1 ): bpy.ops.mesh.extrude_region_move() bpy.ops.transform.translate(value=inter ) bpy.ops.mesh.extrude_region_move() bpy.ops.transform.resize(value=shrink) bpy.ops.mesh.extrude_region_move() bpy.ops.transform.translate(value=etage) bpy.ops.mesh.extrude_region_move() bpy.ops.transform.resize(value=expande) bpy.ops.mesh.extrude_region_move() bpy.ops.transform.translate(value=inter) bpy.ops.transform.resize(value=(0.1,0.1,0.1)) bpy.ops.mesh.extrude_region_move() bpy.ops.mesh.merge(type='CENTER', uvs=False) bpy.ops.mesh.select_all(action='SELECT') normInside = toit < 0.5 bpy.ops.mesh.normals_make_consistent(inside=False) bpy.ops.object.mode_set(mode='OBJECT') def dessine_tours(hauteur_etage = 2,hauteur_inter_etage = 1,profondeur=0.8,nb_etage = 10,toit = 1): etage = Vector((0,0,hauteur_etage)) inter = Vector((0,0,hauteur_inter_etage)) shrink = (profondeur,profondeur,profondeur) expande = (1 / profondeur,1 / profondeur,1 / profondeur) bpy.ops.object.mode_set(mode='EDIT') bpy.ops.mesh.edge_face_add() bpy.ops.mesh.extrude_region_move() bpy.ops.transform.translate(value=inter * 0.4) bpy.ops.transform.resize(value=(0.1,0.1,0.1)) bpy.ops.mesh.extrude_region_move() for i in range(nb_etage - 3 ): bpy.ops.mesh.extrude_region_move() bpy.ops.transform.translate(value=etage ) bpy.ops.mesh.extrude_region_move() bpy.ops.transform.translate(value=inter) bpy.ops.transform.resize(value=(5.0,5.0,5.0)) bpy.ops.mesh.extrude_region_move() bpy.ops.transform.translate(value=inter/2) bpy.ops.mesh.extrude_region_move() bpy.ops.transform.translate(value=inter) bpy.ops.transform.resize(value=(0.1,0.1,0.1)) bpy.ops.mesh.merge(type='CENTER', uvs=False) bpy.ops.mesh.select_all(action='SELECT') normInside = toit < 0.5 bpy.ops.mesh.normals_make_consistent(inside=False) bpy.ops.object.mode_set(mode='OBJECT') def correct_normal_building(obj): x_normal,y_normal,z_normal = obj.data.polygons[0].normal if z_normal > 0 : bpy.ops.object.mode_set(mode='EDIT') bpy.ops.mesh.normals_make_consistent(inside=True) bpy.ops.object.mode_set(mode='OBJECT') def draw_parcel_with_function(polygone,name,darw_func,shrink = 0.7,variation_profondeur_etage = 0.2,shrink_toit = 1 , nb_etage =1): '''nb_verts = len(polygone) edges =[] for i in range(nb_verts): edges.append((i,(i+1) % nb_verts)) mesh = bpy.data.meshes.new('polygone_' + name) mesh.from_pydata(polygone, edges, []) mesh.update() obj,base = add_obj(mesh, bpy.context) centerposition = average_position(polygone) select_obj(obj,base,mesh) bpy.context.scene.cursor_location = centerposition bpy.ops.object.origin_set(type='ORIGIN_CURSOR') ''' bpy.ops.transform.resize(value=(shrink, shrink, shrink)) etage = nb_etage if(etage > 0): profondeur_ = random()/4 + 0.75 darw_func(nb_etage = etage,profondeur = profondeur_, toit = shrink_toit) obj = bpy.context.selected_objects[0] correct_normal_building(obj) '''deselect_obj(base,mesh) return obj,base''' def aply_drawing_function(drawing_function ,bat_name, hauteur_etage ,hauteur_inter_etage,reduction_initial ,profondeur,nb_etage ,toit ): obj_select_list = [] for obj in bpy.context.selected_objects: obj_select_list.append(obj) obj.select = False for obj in obj_select_list: obj.select = True bpy.context.scene.objects.active = obj polygon = get_poly_from_object(obj) draw_parcel_with_function(polygon,bat_name,drawing_function,reduction_initial,profondeur,toit,nb_etage) obj.select = False for obj in obj_select_list: obj.select = True def dessine_polygone_parcel(polygone , name,shrink = 0.7 , variation_profondeur_etage = 0.2 , shrink_toit = 1 , nb_etage = 1 , h_etage = 2 , h_inter = 1): nb_verts = len(polygone) edges =[] for i in range(nb_verts): edges.append((i,(i+1) % nb_verts)) mesh = bpy.data.meshes.new(name) mesh.from_pydata(polygone, edges, []) mesh.update() obj,base = add_obj(mesh, bpy.context) centerposition = average_position(polygone) select_obj(obj,base,mesh) bpy.context.scene.cursor_location = centerposition bpy.ops.object.origin_set(type='ORIGIN_CURSOR') bpy.ops.transform.resize(value=(shrink, shrink, shrink)) etage = nb_etage if(etage > 0): profondeur_ = random()/4 + 0.75 dessine_batiment(nb_etage = etage,profondeur = profondeur_, toit = shrink_toit,hauteur_etage = h_etage ,hauteur_inter_etage = h_inter) correct_normal_building(obj) deselect_obj(base,mesh) return obj,base ############################################################################################################### # # ############################################################################################################### def dessine_ville(polygone_englobant = [] , tPoly = [],nb_centre_activite = 1 ,nb_etage_min =1,nb_etage_max=30,shrink_parcel=0.7,isWireFrame = False,hauteur_etage = 3 , profondeur_etage=0.8,variation_profondeur_etage=0.2,shrink_toit = -1,seed_ = 42,percentage_missing = 0.05): seed(seed_) bpy.ops.object.select_all(action = 'DESELECT') centre_ville = average_position(polygone_englobant) tCentreVilles = [get_random_point_in_bounds(resize_polygone_from_center(polygone_englobant,0.6) ) for i in range(nb_centre_activite)] indice = 0 if isWireFrame : for pol in tPoly : dessine_polygone(pol,'wirePoly') indice = indice + 1 for centre in tCentreVilles: bpy.ops.mesh.primitive_cube_add(location = centre) return list_indice_length = [] for i in range(len(polygone_englobant)): v1 = polygone_englobant[i] v2 = polygone_englobant[(i+1) % len(polygone_englobant) ] length = ((v1.x - v2.x)*(v1.x - v2.x)) + ((v1.y - v2.y) * (v1.y - v2.y)) list_indice_length.append((i,length)) list_indice_length.sort(key=lambda side: -side[1]) max_distance = math.sqrt(list_indice_length[0][1]) * 0.5 max_function = nb_etage_max-nb_etage_min coeff_decrease = 0.03 inflexion_coef = max_distance / 3 print(max_distance) index = 0 for polygone in tPoly: index = index +1 if (len(polygone) < 4): polygone = arrange_triangle(polygone) if random() < 1 - percentage_missing: center_polygone = average_position(polygone) d_pole_ville = 100000000 distance_temp = 0 for centreVille in tCentreVilles: distance_temp = (centreVille - center_polygone).length if distance_temp < d_pole_ville : d_pole_ville = distance_temp distance_centreville = d_pole_ville n_etage = int(max_function*(-math.atan(coeff_decrease*(distance_centreville -inflexion_coef))/math.pi+1/2)) + 1 + nb_etage_min if n_etage > 0: n_etage = int(random() * (n_etage - 1)) + 2 toit = random() if shrink_toit < 0 else shrink_toit percentage_etage = random() * 0.6 + 0.2 h_etage = hauteur_etage * percentage_etage h_inter_etage = hauteur_etage - h_etage dessine_polygone_parcel(polygone,'polygon',shrink = shrink_parcel,variation_profondeur_etage = shrink_parcel,shrink_toit = toit ,nb_etage = n_etage , h_etage = h_etage , h_inter = h_inter_etage) print (str((1.0 *index) / len(tPoly))) ############################################################################################################### # # ############################################################################################################### def dessine_ville_from_list(tPoly = [],uptownPosition = Vector((0,0,0)) ,influence_zone = 251025,nb_etage_min =1,nb_etage_max=35,shrink_parcel=0.7,hauteur_etage = 3,profondeur_etage=0.8,variation_profondeur_etage=0.2,shrink_toit = -1,seed_ = 42): seed(seed_) bpy.ops.object.select_all(action = 'DESELECT') centre_ville = uptownPosition max_distance = math.sqrt(influence_zone) max_function = nb_etage_max-nb_etage_min coeff_decrease = 0.03 inflexion_coef = max_distance / 3 index = 0 for polygone in tPoly: index = index +1 if (len(polygone) < 4): polygone = arrange_triangle(polygone) center_polygone = average_position(polygone) distance_centreville = (centre_ville - center_polygone).length n_etage = int(max_function*(-math.atan(coeff_decrease*(distance_centreville -inflexion_coef))/math.pi+1/2)) + 1 + nb_etage_min if n_etage > 0: n_etage = int(random() * (n_etage - 1)) + 2 toit = random() if shrink_toit < 0 else shrink_toit percentage_etage = random() * 0.6 + 0.2 h_etage = hauteur_etage * percentage_etage h_inter_etage = hauteur_etage - h_etage dessine_polygone_parcel(polygone,'polygon',shrink = shrink_parcel,variation_profondeur_etage = shrink_parcel,shrink_toit = toit ,nb_etage = n_etage , h_etage = h_etage , h_inter = h_inter_etage) print (str((1.0 *index) / len(tPoly))) def dessin_ville_from_selection(uptown_Position = Vector((0,0,0)), nEtageMin = 1 , nEtageMax = 35 , shrinkParcel = 0.7 , hauteurEtage = 3 , profondeurEtage = 0.8): poly_list = [] for obj in bpy.context.selected_objects: p = get_poly_from_object(obj) if(p != []): poly_list.append(p) obj.select = False dessine_ville_from_list(tPoly = poly_list, uptownPosition = uptown_Position , nb_etage_min = nEtageMin , nb_etage_max = nEtageMax , shrink_parcel = shrinkParcel , hauteur_etage = hauteurEtage ,profondeur_etage = profondeurEtage ) ############################################################################################################### # # ############################################################################################################### def basic_main(factorPoly = True ,isOnlyPoly = True , nUptown = 2 , floor_height = 3 , floor_depth = 0.8 , size_building = 0.7 , percentage_missing_building = 0.05): print('debut') poly = generate_polygon(Vector((0,0,0)),175,5) poly = resize_polygone_from_center(poly,factorPoly) tpoly = [poly] print("air total : " +str(area(poly))) tpoly = subdivide_until_area(poly,450) nb_poly = len(tpoly) print('subdivision terminee , nb poly : ' + str(nb_poly)) dessine_ville(polygone_englobant = poly,tPoly = tpoly , isWireFrame = isOnlyPoly , nb_etage_max = 35 , nb_centre_activite = nUptown , percentage_missing = percentage_missing_building , shrink_parcel = size_building , hauteur_etage = floor_height ,profondeur_etage = floor_depth) print('subdivision terminee , nb poly : ' + str(nb_poly)) ########################################################################################### # # ########################################################################################### def initSceneProperties(scn): bpy.types.Scene.nbEtage = IntProperty( name = "nb etage", description = "multiply the begining polygon", min = 1, max = 35) scn['nbEtage'] = 10 ######################################################################################## bpy.types.Scene.tailleEtage = FloatProperty( name = "taille etage", min = 0, max =10) scn['tailleEtage'] = 2.0 bpy.types.Scene.tailleInter = FloatProperty( name = "taille interetage", min = 0, max = 10) scn['tailleInter'] = 1.0 bpy.types.Scene.profonfeur = FloatProperty( name = "profondeur etage", min = 0.1, max = 1) scn['profonfeur'] = 0.8 bpy.types.Scene.taille_rue = FloatProperty( name = "parcel occupation", min = 0.1, max = 1) scn['taille_rue'] = 0.8 bpy.types.Scene.nbEdges = IntProperty( name = "edges", description = "number of edges in polygone", min = 3, max = 100) scn['nbEdges'] = 5 bpy.types.Scene.fieldRadius = IntProperty( name = "radius", description = "radius of the field", min = 1, max = 1000) scn['fieldRadius'] = 175 ######################################################################################## bpy.types.Scene.FactorPolyBegin = IntProperty( name = "multiply Factor polygone", description = "multiply the begining polygon", min = 1, max = 20) scn['FactorPolyBegin'] = 1 bpy.types.Scene.NbUpTownCenter = IntProperty( name = "upTown Centers", description = "number of activity center in the city", min = 1, max = 10) scn['NbUpTownCenter'] = 2 bpy.types.Scene.ceil_height = FloatProperty( name = "height of each floor", min = 0.1, max = 50.0) scn['ceil_height'] = 3.0 bpy.types.Scene.percentage_missing = FloatProperty( name = "percentage missing", min = 0.0, max = 1.0) scn['percentage_missing'] = 0.05 bpy.types.Scene.BoolOnlyPoly = BoolProperty( name = "only polygon", description = "only draw the base polygon of the city") scn['BoolOnlyPoly'] = False ######################################################################################## bpy.types.Scene.minArea = IntProperty( name = "minimal subdivision area", description = "a polygon under this area won't be divide", min = 0, max = 5000) scn['minArea'] = 450 ########################################################################################### # generate city from nothing interface # ########################################################################################### class LayoutCityGeneratorPanel(bpy.types.Panel): """Creates a Panel in the scene context of the properties editor""" bl_label = "City Generator" bl_idname = "SCENE_PT_layout" bl_space_type = 'VIEW_3D' bl_region_type = 'TOOLS' def draw(self, context): layout = self.layout scene = context.scene box = layout.box() box.prop(scene, 'BoolOnlyPoly') box.prop(scene, 'FactorPolyBegin') box.prop(scene , 'NbUpTownCenter') box.prop(scene , 'ceil_height') box.prop(scene , 'percentage_missing') # Big render button box.operator("my.generator") class GenerateBigCity(bpy.types.Operator): bl_idname = "my.generator" bl_label = "Generate City" def execute(self, context): onlyPoly = context.scene['BoolOnlyPoly'] factorPolyBegin = context.scene['FactorPolyBegin'] nbActivityCenter = context.scene['NbUpTownCenter'] floorHeight = context.scene['ceil_height'] percentage = context.scene['percentage_missing'] basic_main(factorPoly = factorPolyBegin ,isOnlyPoly = onlyPoly , nUptown = nbActivityCenter, floor_height = floorHeight, percentage_missing_building = percentage) return{'FINISHED'} ########################################################################################### # create field interface # ########################################################################################### class LayoutCreateFieldPanel(bpy.types.Panel): """Creates a Panel in the scene context of the properties editor""" bl_label = "Field Creator" bl_idname = "SCENE_PT_layout_field_creator" bl_space_type = 'VIEW_3D' bl_region_type = 'TOOLS' def draw(self, context): layout = self.layout scene = context.scene layout.prop(scene, 'nbEdges') layout.prop(scene, 'fieldRadius') layout.label(text="Generate the Field polygon") row = layout.row() row.operator("my.polygoncreator") class CreateFieldPolygonSelecterOp(bpy.types.Operator): bl_idname = "my.polygoncreator" bl_label = "create field" def execute(self, context): nb_edge = context.scene['nbEdges'] radius = context.scene['fieldRadius'] center = Vector((0,0,0)) poly = generate_polygon(center , radius , nb_edge) dessine_polygone(poly,'poly') return{'FINISHED'} ########################################################################################### # divide polygon interface # ########################################################################################### class LayoutDividePanel(bpy.types.Panel): """Creates a Panel in the scene context of the properties editor""" bl_label = "City Divider" bl_idname = "SCENE_PT_layout_divider" bl_space_type = 'VIEW_3D' bl_region_type = 'TOOLS' def draw(self, context): layout = self.layout scene = context.scene layout.prop(scene, 'minArea') layout.label(text="Divide") col = layout.column(align = True) col.operator("my.simpledivider") col.operator("my.fromcenterdivider") col.operator("my.evendivider") class DivideNormalPolygonSelecterOp(bpy.types.Operator): bl_idname = "my.simpledivider" bl_label = "Simple" def execute(self, context): min_area = context.scene['minArea'] decoupe_selection(min_area) return{'FINISHED'} class DivideFromCenterPolygonSelecterOp(bpy.types.Operator): bl_idname = "my.fromcenterdivider" bl_label = "Center" def execute(self, context): min_area = context.scene['minArea'] decoupe_selection_from_center(min_area) return{'FINISHED'} class DivideEvenlyPolygonSelecterOp(bpy.types.Operator): bl_idname = "my.evendivider" bl_label = "Even" def execute(self, context): min_area = context.scene['minArea'] decoupe_selection_evenly(min_area) return{'FINISHED'} ########################################################################################### # generate city from polygon interface # ########################################################################################### uptown_object = None class GenerateCityFromSelection(bpy.types.Operator): bl_idname = "my.generator_selection" bl_label = "Generate City From Selection" def execute(self, context): global uptown_object clear = lambda: os.system('cls') clear() upTown = uptown_object.location if uptown_object != None else Vector((0,0,0)) print(str(upTown)) dessin_ville_from_selection(upTown) return{'FINISHED'} class PlaceUpTownObject(bpy.types.Operator): bl_idname = "my.uptown_operator" bl_label = "place uptown" def execute(self, context): global uptown_object if uptown_object == None: bpy.ops.mesh.primitive_cube_add() uptown_object = bpy.context.object uptown_object.location = bpy.context.scene.cursor_location return{'FINISHED'} class LayoutgenerateCitySelectionPanel(bpy.types.Panel): """Creates a Panel in the scene context of the properties editor""" bl_label = "Selection City Generator" bl_idname = "SCENE_PT_layout_selection_city_generator" bl_space_type = 'VIEW_3D' bl_region_type = 'TOOLS' def draw(self, context): layout = self.layout scene = context.scene row = layout.row() row.operator("my.uptown_operator") layout.label(text="Generate city") row = layout.row() row.operator("my.generator_selection") ########################################################################################### # buildings interface # ########################################################################################### class LayoutCreatBatimentPanel(bpy.types.Panel): """Creates a Panel in the scene context of the properties editor""" bl_label = "City building generator" bl_idname = "SCENE_PT_layout_test" bl_space_type = 'VIEW_3D' bl_region_type = 'TOOLS' def draw(self, context): layout = self.layout scene = context.scene col = layout.column(align = True) col.prop(scene, 'nbEtage') col.prop(scene, 'tailleEtage') col.prop(scene, 'tailleInter') col.prop(scene, 'profonfeur') col.prop(scene, 'taille_rue') layout.label(text="Generate Building") row = layout.row(align=True) row.operator("my.generatorbatsimple") row.operator("my.house") row.operator("my.tower") class GenerateBatSimple(bpy.types.Operator): bl_idname = "my.generatorbatsimple" bl_label = "Building" def execute(self, context): nbEtage = context.scene['nbEtage'] taille_et = context.scene['tailleEtage'] taille_inter = context.scene['tailleInter'] profondeur = context.scene['profonfeur'] ocupation = context.scene['taille_rue'] aply_drawing_function(dessine_batiment ,"immeuble", taille_et ,taille_inter,ocupation ,profondeur,nbEtage ,1 ) return{'FINISHED'} class GenerateMaison(bpy.types.Operator): bl_idname = "my.house" bl_label = "House" def execute(self, context): nbEtage = context.scene['nbEtage'] taille_et = context.scene['tailleEtage'] taille_inter = context.scene['tailleInter'] profondeur = context.scene['profonfeur'] ocupation = context.scene['taille_rue'] aply_drawing_function(dessine_maison ,"maison", taille_et ,taille_inter,ocupation ,profondeur,nbEtage ,1 ) return{'FINISHED'} class GenerateTour(bpy.types.Operator): bl_idname = "my.tower" bl_label = "Tower" def execute(self, context): nbEtage = context.scene['nbEtage'] taille_et = context.scene['tailleEtage'] taille_inter = context.scene['tailleInter'] profondeur = context.scene['profonfeur'] ocupation = context.scene['taille_rue'] aply_drawing_function(dessine_tours ,"tour", taille_et ,taille_inter,ocupation ,profondeur,nbEtage ,1 ) return{'FINISHED'} ########################################################################################### # register # ########################################################################################### @persistent def addon_handler(scene): bpy.app.handlers.scene_update_post.remove(addon_handler) # perform something here, e.g. initialization initSceneProperties(bpy.context.scene) initSceneProperties(bpy.context.scene) def register(): bpy.app.handlers.scene_update_post.append(addon_handler) def unregister(): pass if __name__ == "__main__": # only for live edit. bpy.utils.register_module(__name__)
UTF-8
Python
false
false
2,014
13,262,859,049,984
dcf0b4e939eaafa2793d1979a29e67cb05d10070
be75d06f9392c7bfa4faecc332eee49168accd19
/make_commit.py
cfc0103220498d918a865166efa873893b5937f5
[]
no_license
eoinmurray/rino-app
https://github.com/eoinmurray/rino-app
a88dc30376e76046af52d10b918ff89a9209c6be
17134e5ca6a1aaa142aa72f40436c50f912433ca
refs/heads/master
2016-09-09T13:45:03.922968
2014-06-06T08:50:31
2014-06-06T08:50:31
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
from flask import render_template, jsonify, redirect, send_file, Blueprint import os import jstree from rino import rinofile make_commit_page = Blueprint('make_commit_page', __name__, template_folder='templates') @make_commit_page.route('/commit') def make_a_commit(): return render_template("make_commit.html", cwd = os.path.basename(os.getcwd())) @make_commit_page.route('/commit/data/get_rinofile') def get_rinofile(): return jsonify(rinofile.get()) @make_commit_page.route('/commit/data/get_dir') def get_dir(): return jsonify(tree = make_tree(os.getcwd())) def make_tree(path): """http://stackoverflow.com/questions/10961378/how-to-generate-an-html-directory-list-using-python""" tree = dict(name=os.path.basename(path), children=[]) try: lst = os.listdir(path) except OSError: pass #ignore errors else: for name in lst: fn = os.path.join(path, name) if os.path.isdir(fn): tree['children'].append(make_tree(fn)) else: tree['children'].append(dict(name=name, children = [])) return tree
UTF-8
Python
false
false
2,014
16,724,602,662,747
3b7a9ed671b8c764ec56ed8fdc3d9337ce143e6b
a37dcd03535d842746d4b615dad66c1e5e01d836
/fooling/merger.py
eb3283b4bcf2a5a56ff8e93ce7b4b5e90b09c7f8
[]
no_license
yasusii/fooling
https://github.com/yasusii/fooling
47a7ff088ced205a8a29ae2cecc33f85809bf293
6659119730270eba4640b0085b4939dd44975ed8
refs/heads/master
2021-01-10T01:15:00.021696
2010-06-20T10:18:06
2010-06-20T10:18:06
49,402,846
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
#!/usr/bin/env python ## ## merger.py ## import sys, os, os.path from array import array from struct import pack, unpack import pycdb as cdb from utils import encode_array, decode_array, idx_info from utils import PROP_SENT, PROP_DOCID, PROP_LOC, PROP_IDXINFO __all__ = [ 'Merger' ] # Estimate the number of unique terms: # assuming roughly 25% of the words are common. def estimate_terms(nterms): return reduce(lambda r,n: r + max(n - int(r*0.25), 0), nterms, 0) ## IndexFile ## class IndexFile(object): def __init__(self, fname): self.fname = fname self.cdb = None return def __repr__(self): return '<%s (docs=%d, terms=%d)>' % (self.fname, self.ndocs, self.nterms) def open(self): if not self.cdb: self.cdb = cdb.init(self.fname) (self.ndocs, self.nterms) = idx_info(self.cdb) return def close(self): self.cdb.close() return def assignnewids1(self, newids): self.oldids = [] for (k,v) in self.cdb.iteritems(startkey=pack('>ci', PROP_DOCID, 1)): if k[0] == PROP_LOC: break loc = v[4:] if loc in newids: continue (oldid,) = unpack('>xi', k) self.oldids.append((oldid, loc)) #assert self.ndocs == len(self.oldids), (self.ndocs, self.oldids) for (oldid,loc) in sorted(self.oldids, reverse=True): newid = len(newids) newids[loc] = newid return def assignnewids2(self, newids): self.old2new = {} for (oldid,loc) in self.oldids: self.old2new[oldid] = newids[loc] return def copysents(self, maker): for (k,v) in self.cdb.iteritems(): if k[0] != PROP_SENT: break (oldid, pos) = unpack('>xii', k) if oldid in self.old2new: maker.add(pack('>cii', PROP_SENT, self.old2new[oldid], pos), v) else: raise ValueError('empty index') self.next = self.cdb.iteritems(startkey=k).next return def convertoldids(self, bits): a = decode_array(bits) r = array('i') for i in xrange(0, len(a), 2): if a[i] in self.old2new: r.append(self.old2new[a[i]]) r.append(a[i+1]) return r # cdbmerge def cdbmerge(idxs): q = [] for idx in idxs: try: q.append((idx.next(), idx)) except StopIteration: pass k0 = None vs = None while q: q.sort() ((k,v), idx) = q.pop(0) if k0 != k: if vs: yield (k0,vs) vs = [] vs.append((v, idx)) k0 = k try: q.append((idx.next(), idx)) except StopIteration: continue if vs: yield (k0,vs) return ## idxmerge ## ## idxs: a list of indices to merge (the oldest index first). ## def idxmerge(cdbname, idxstomerge, verbose=0): # Count all the unique locations and assign new document ids. idxorder = {} loc2docid = {} for (i,idx) in enumerate(reversed(idxstomerge)): idx.assignnewids1(loc2docid) idxorder[idx] = i n = len(loc2docid) loc2docid = dict( (loc,n-docid) for (loc,docid) in loc2docid.iteritems() ) for idx in idxstomerge: idx.assignnewids2(loc2docid) # Create a new index file. maker = cdb.cdbmake(cdbname, cdbname+'.tmp') if verbose: print >>sys.stderr, 'Merging: %r (docs=%d, est. terms=%d): %r' % \ (cdbname, sum( idx.ndocs for idx in idxstomerge ), estimate_terms( idx.nterms for idx in idxstomerge ), idxstomerge) # Copy sentences to a new index file with unique ids. for idx in idxstomerge: idx.copysents(maker) # Merge document ids and offsets. nterms = 0 docid2info = [] for (k,vs) in cdbmerge(idxstomerge): if k[0] == PROP_LOC or k[0] == PROP_IDXINFO: break if k[0] == PROP_DOCID: # read a docid->loc mapping (oldid,) = unpack('>xi', k) for (info,idx) in vs: if oldid not in idx.old2new: continue newid = idx.old2new[oldid] docid2info.append((newid, info)) assert loc2docid[info[4:]] == newid else: # merge docid+pos sets vs = sorted(( (idxorder[idx], idx.convertoldids(v)) for (v,idx) in vs )) ents = sum( len(a) for (_,a) in vs )/2 (_,r) = vs.pop(0) for (_,a) in vs: r.extend(a) maker.add(k, encode_array(ents, r)) nterms += 1 if verbose and nterms % 1000 == 0: sys.stderr.write('.'); sys.stderr.flush() # write docid->loc mappings (avoiding dupes) docid2info.sort() for (docid,info) in docid2info: maker.add(pack('>ci', PROP_DOCID, docid), info) # write loc->docid mappings (avoiding dupes) for (loc,docid) in sorted(loc2docid.iteritems()): if loc: maker.add(PROP_LOC+loc, pack('>i', docid)) if verbose: print >>sys.stderr, 'done: docs=%d, terms=%d' % (len(docid2info), nterms) maker.add(PROP_IDXINFO, pack('>ii', len(docid2info), nterms)) maker.finish() return ## Merger ## class Merger(object): def __init__(self, indexdb, max_docs_threshold=2000, max_terms_threshold=50000, verbose=0): self.indexdb = indexdb self.max_docs_threshold = max_docs_threshold self.max_terms_threshold = max_terms_threshold self.verbose = verbose return def flush(self, idxid, idxstomerge): if not idxstomerge: return fname = self.indexdb.gen_idx_fname(idxid) if 1 < len(idxstomerge): idxmerge(fname+'.new', idxstomerge, self.verbose) for idx in idxstomerge: idx.close() os.rename(idx.fname, idx.fname+'.bak') os.rename(fname+'.new', fname) elif idxstomerge[0].fname == fname: if self.verbose: print >>sys.stderr, 'Remain: %r' % (fname) else: os.rename(idxstomerge[0].fname, fname) if self.verbose: print >>sys.stderr, 'Rename: %r <- %r' % (fname, idxstomerge[0].fname) return def run(self, cleanup=False): # idxs: a list of indices (the oldest index comes first). idxs = [ IndexFile(os.path.join(self.indexdb.idxdir, fname)) for fname in reversed(self.indexdb.idxs) ] ndocs = 0 nterms = [] newidx = 0 idxstomerge = [] for idx in idxs: idx.open() if ((self.max_docs_threshold and self.max_docs_threshold < ndocs) or (self.max_terms_threshold and self.max_terms_threshold < estimate_terms(nterms))): self.flush(newidx, idxstomerge) newidx += 1 idxstomerge = [] ndocs = 0 nterms = [] ndocs += idx.ndocs nterms.append(idx.nterms) idxstomerge.append(idx) self.flush(newidx, idxstomerge) if cleanup: for fname in os.listdir(self.indexdb.idxdir): if fname.endswith('.cdb.bak'): fname = os.path.join(self.indexdb.idxdir, fname) if self.verbose: print >>sys.stderr, 'Removing: %r' % fname os.unlink(fname) self.indexdb.refresh() return
UTF-8
Python
false
false
2,010
14,285,061,226,865
92c7f86d634bbfc4ee1ce3f2fbb06b0be4ab4c7c
2ce2667812ad26bbf6c3848354e90e1042a77e1c
/SlapshotParser/models.py
9ca84751d2495bcea88771ba36bbad5f1ef88e39
[]
no_license
soelgary/SlapshotAlerts
https://github.com/soelgary/SlapshotAlerts
ea57f3d9631c8e088d0004b5436fcd33512a9dc8
b9e5fd09ad10664623b1185a6302d81285653cc0
refs/heads/master
2016-09-06T14:20:52.729535
2014-06-04T03:08:34
2014-06-04T03:08:34
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
from django.db import models from SlapshotAlertsHome.models import Page import lxml.etree import lxml.html # Create your models here. class HockeyMonkeyParser(): def __init__(self): self.name_selector = 'td h1.h1navbar' self.price_selector = 'form div[id="ysw-item-price"]' def parse(self, url, request): root = lxml.html.fromstring(request.text) name_list = root.cssselect(self.name_selector) price_list = root.cssselect(self.price_selector) if len(price_list) == 1 and len(name_list) == 1: price = price_list[0].text_content().strip() name = name_list[0].text_content().strip() page = Page(url=url, price=price, name=name) page.save()
UTF-8
Python
false
false
2,014
3,539,053,074,618
76c1b69ef0ed57454850838d1e5e41bcee9ee639
69a4df027a1d2a177367674a37b1ae8edab1bf25
/Products.RelationsLayout/Products/RelationsLayout/skin.py
4cd9fdd6d9d73535ac5bad7bd1d305ba8f7f0b4f
[]
no_license
aktion-hip/silva_tools
https://github.com/aktion-hip/silva_tools
5a9242e78389ccd9019a8e44483ff6d6b2feb398
4e82528a2e252dae99332188e4437eaa59d726d3
refs/heads/master
2020-07-04T05:00:41.504871
2013-10-19T21:02:15
2013-10-19T21:02:15
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
# -*- coding: utf-8 -*- # Copyright (c) 2011 RelationWare, Benno Luthiger. All rights reserved. # See also LICENSE.txt from silva.core.layout.interfaces import ISilvaSkin from silva.core import conf as silvaconf from silva.core.layout.porto.interfaces import IPorto class IRelations(IPorto): """Relations layer used to attach resources. """ silvaconf.resource('relations.css') class IRelationsSkin(IRelations, ISilvaSkin): """Skin for Relations Layout. """ silvaconf.skin('RelationsLayout')
UTF-8
Python
false
false
2,013
2,052,994,382,254
d64e8b8166e2daa3b9a4a4ca8145204f66dc836b
baab1369d82681a1e20e103b0b8ef0347a76ef95
/wiki/tests/__init__.py
170fa3c795f042665d981d67f7cec3815730ee69
[ "GPL-3.0-only" ]
non_permissive
tahnok/wikinotes
https://github.com/tahnok/wikinotes
48b19b4a9055563a06303c14ed3269d40f1eb3b5
41e948031f9bcbc72fdf33695402289e6afcc926
refs/heads/master
2021-01-20T22:50:56.591397
2012-10-27T03:28:12
2012-10-27T03:28:13
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
from wiki.tests.markdown import * """ MORE TESTS TO COME, EVENTUALLY """
UTF-8
Python
false
false
2,012
7,679,401,539,780
9e63e18f8a9d627646aebbf72cb2cdf7de314204
895a625db73c3332da883ebed090c861c50a8d18
/utils/agent.py
df3ab56e91d0d0b507da4fe42ed5bcb4328e3a81
[]
no_license
jul/KISSMyAgent
https://github.com/jul/KISSMyAgent
4e9a30d8b19cf4a8687c17845305d440b60014d4
f281134df555f77fe7b25392cc80e8364f2a29ae
refs/heads/master
2020-06-02T20:21:32.826135
2011-03-06T15:50:12
2011-03-06T15:50:12
314,839
1
0
null
null
null
null
null
null
null
null
null
null
null
null
null
#i!/usr/bin/python #_*_ unicode _*_ class MetaAgent: stat=None personality="naive" x=None to_debug=True y=None max_id=0 repres=None neighbors=[] id=None memory=dict() transaction_amount=10 personalities=[] utility=50 init_val=dict() max_utility=0 min_utility=0 can_bankrupt=True current_partner=None added_value_for_buyer=1 added_value_for_seller=9 fail_before_bankrupcy = False pers_color_mask= 0x0000FF def __repr__(self): return "<repres : %d,%d,%s>" % (self.x,self.y,self.repres) def coord(self,x=None,y=None): if (x == None and y == None): return (self.x,self.y) else: self.x=x self.y=y def color(self): #self.debug("%d , %d " % (self.utility, self.max_utility) ) assert(self.utility <= MetaAgent.max_utility ) ratio_to_max= 1.0 * ( self.utility - MetaAgent.min_utility ) / (MetaAgent.max_utility - MetaAgent.min_utility ) color = ( ( ( 0x00ff - int ( ( ratio_to_max * 0x00ff ) ) ) << 8 & 0x00ff00 ) | self.pers_color_mask ) #self.debug( " color : #%06x\n" % color) return( "#%06x" % color) def clone(self,**change): arg=dict() for k in self.init_val.keys(): arg[k]=self.init_val[k] for k in change.keys(): arg[k]=change[k] self.debug("avant") c=self.__class__(**arg) c.debug("apres") return c def __init__(self,**settings): MetaAgent.max_id=MetaAgent.max_id+1 self.id=MetaAgent.max_id self.init_val=settings for k in settings.keys(): setattr(self,k,settings[k]) #print "%s->%s" % (k,settings[k]) MetaAgent.max_utility=max(MetaAgent.max_utility, self.utility) MetaAgent.min_utility=min(MetaAgent.min_utility, self.utility) if self.personality not in self.personalities: self.personalities+=[ self.personality ] #print "init %s\n" % self def debug(self,msg=""): if self.to_debug: msg= "%s(%d,%d) %s :utilisty:%s::%s" % ( self.id, self.x, self.y, self.personality, self.utility, msg) print msg def choose_neighbor(self): "choix par defaut=au hasard" from random import choice self.current_partner=choice(self.neighbors) return self.current_partner def deal_with(self,partner): ## est ce que l'on veut faire du commerce avec le voisin ? if( not partner ): return 0 self.debug("amount before transaction with %d " % ( partner.id) ) ## on retire le montant de la transaction self.utility-=self.transaction_amount ### on recupere le montant de la transaction + bonus .... ### eventuellement self.utility+=partner.transaction(self.transaction_amount) self.debug("utility after transaction : %d" % self.utility) MetaAgent.max_utility=max(MetaAgent.max_utility, self.utility) MetaAgent.min_utility=min(MetaAgent.min_utility, self.utility) def interaction(self,agent=None): if self.can_bankrupt and self.utility <= 0: self.current_partner = None return False self.current_partner = agent if agent else self.choose_neighbor() self.deal_with(self.current_partner) def transaction(self,amount): ### I am the buyer if self.can_bankrupt and self.utility <= amount: return 0 if self.fail_before_bankrupcy else amount self.utility -= amount self.utility += self.added_value_for_buyer self.debug("transaction made I give %d" % ( amount + self.added_value_for_seller) ) ### je prend de mon larre feuille ### et je paie return self.added_value_for_seller + amount def __str__(self): msg=u"" if self.utility: msg+="%d,%d//id=%d//utility:%s//pers:%s\n" % (self.x, self.y,self.id, str(self.utility),self.personality) return msg class ToutPourMaGueule(MetaAgent): def __init__(self,**kwargs): kwargs["personality"]="ToutPourMaGueule" kwargs["pers_color_mask"]=0xCC0000 self=MetaAgent.__init__(self,**kwargs) def choose_neighbor(self): self.debug("on fait pas de commerce") self.current_partner=None return False def deal_with(self,agent=None): self.debug("Pas de deal") return False def transaction(self,amount): self.utility+=amount MetaAgent.max_utility=max(MetaAgent.max_utility, self.utility) MetaAgent.min_utility=min(MetaAgent.min_utility, self.utility) self.debug("je prend mais ne rends pas") return 0 class BoyScout(MetaAgent): def __init__(self,**kwargs): kwargs["personality"]="Confiant" MetaAgent.__init__(self,**kwargs)
UTF-8
Python
false
false
2,011
13,752,485,311,453
57ff70d59619efa160aad1dfc3895d1286b3618b
ef3c541d9b3818a0976988d55e8f35dec3ddad80
/model/test.py
4931b6cea3c156e6c6622fe684e13e8bdb878cb9
[]
no_license
kislayraj/restfull-web-service-python-tornado
https://github.com/kislayraj/restfull-web-service-python-tornado
df555aff9609d29639c815003f213128a4cc33b6
b20c698c541d69ee306c86d2b6b1e90199a7fbf2
refs/heads/master
2021-01-18T07:46:39.446452
2013-10-15T01:44:34
2013-10-15T01:44:34
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
# This Python file uses the following encoding: utf-8 #!/bin/env python #-*-coding:utf-8-*- #coding = utf-8 from table import Table class Test(Table): """docstring for Test""" _name = 'test' _primary = 'id' _dataValidate = { 'name':{ 'isNotEmpty':{'msg':'密码不能为空', 'code':1001}, 'isUserName':{'msg':'用户名不符合规则', 'code':1002}, }, 'pwd':{ 'isNotEmpty':{'msg':'密码不能为空', 'code':1003}, }, } def __init__(self, db): '''connect to database''' self.db = db def commit(self): try: sql = [ "update test set pwd = 'aaaaa' where id = 1", "insert test(name, pwd) values('aaaaaa', '123456')", "insert partner(`member_id`, `partner`, `key`) values(1, 'aaaaa', '123456')" ] status = self.db.transaction(sql) return status except Exception, e: return e
UTF-8
Python
false
false
2,013
4,286,377,381,604
02233142cc222488722d3d478c05bd8df907c6b9
82256676fd6b857b7a9fd54ab339d390c1364ab0
/h2o-perf/bench/py/h2oPerf/Signal.py
9e5f49a79f5fb6f242baf0e46b925e245538a5ff
[ "Apache-2.0" ]
permissive
ivanliu1989/h2o
https://github.com/ivanliu1989/h2o
8d0def46c070e78718ba13761f20ef2187545543
e00b367df0a33c400ae33bc869a236f254f625ed
refs/heads/master
2023-04-27T20:40:16.666618
2014-10-23T02:12:36
2014-10-23T02:12:36
25,618,199
0
0
Apache-2.0
true
2023-04-15T23:24:35
2014-10-23T03:49:40
2014-10-22T21:47:32
2023-04-15T23:24:32
616,647
0
0
2
null
false
false
from math import sqrt class Signal: """ A class that represents a signal lagged by amount 'order'. A signal's 'order' is how much of its history is tracked. This is also known as the lag. The first element in the signal list is the most recent value of the signal. """ def __init__(self, order): self.signal = [] self.order = order def add(self, sig): """ Prepend an incoming signal value to the signal history. """ if self.length() < self.order: self.signal.insert(0, sig) else: self.pop() self.signal.insert(0, sig) def pop(self): """ Pop the last element of the list. """ del self.signal[-1] def length(self): """ Remove the last element of the list. """ return len(self.signal) def report(self): """ A debugging function to report information on the signal. """ print str(self.signal), str(self.can_use()) def can_use(self): """ A signal may be used once it's length is equal to its order. """ return len(self.signal) == self.order def mean(self): """ Compute the mean of the signal. """ return sum(self.signal) * 1. / (1. * self.length()) def sigma(self): """ Compute the standard deviation of the signal. """ ybar = self.mean() num = sum([(y - ybar) ** 2 for y in self.signal]) * 1.0 denom = self.length() * 1. if denom <= 0: raise Exception("DIVISION BY ZERO") return sqrt(num / denom) @staticmethod def run_test(): """ A unit test for the Signal class. """ x = range(20) x2 = [0.123, 232.13, 21938, 2341, 214, 124, 214, 9291, 12313, 8237.123, 23, 14, 21, 3, 4, 6, 7, 8, 1293, 10191] s1 = Signal(5) s2 = Signal(1) for i in x[::-1]: s1.add(i) s1.report() for i in x2[::-1]: s2.add(i) s2.report()
UTF-8
Python
false
false
2,014
2,997,887,223,742
372808e609a2ee06f2c090088f73ccecc76f2f1b
291b2f8a5ad2e61c3eb072451b8630a0c1585ac5
/tools/cache.py
83757f2a69f4db84d629bf0d4124bdeb14f32e13
[ "BSD-3-Clause" ]
permissive
linpawslitap/mds_scaling
https://github.com/linpawslitap/mds_scaling
64bf618f5c2f9c399d66a33b4fab3d94fe90e528
c7edfa5dcdf64a579f92580f034a079489d20cd1
refs/heads/master
2021-01-25T04:02:27.184395
2014-03-11T20:19:14
2014-03-11T20:19:14
4,360,844
0
1
null
null
null
null
null
null
null
null
null
null
null
null
null
#!/usr/bin/python ######################################################################### # Author: Kai Ren # Created Time: 2013-10-27 20:09:01 # File Name: ./latency.py # Description: ######################################################################### class CacheEntry: def __init__(self, path): self.next_entry = None self.prev_entry = None self.path = path class Cache: def __init__(self, size): self.entry = {} self.head = None self.tail = None self.limit = size def __pop(self): if self.head is None: return item = self.head self.entry.pop(item.path) self.head = item.next_entry if self.head is not None: self.head.prev_entry = None else: self.tail = None def __append(self, item): if self.tail is None: self.head = item self.tail = item else: self.tail.next_entry = item item.prev_entry = self.tail item.next_entry = None self.tail = item def __delete(self, item): if self.head == item: self.head = item.next_entry if self.head is not None: self.head.prev_entry = None elif self.tail == item: self.tail = item.prev_entry if self.tail is not None: self.tail.next_entry = None item.prev_entry = None item.next_entry = None return else: item.prev_entry.next_entry = item.next_entry item.next_entry.prev_entry = item.prev_entry item.prev_entry = None item.next_entry = None return def put(self, path): if path in self.entry: return if len(self.entry) == self.limit: self.__pop() item = CacheEntry(path) self.__append(item) self.entry[path] = item def get(self, path): if path not in self.entry: return None item = self.entry[path] if self.tail == item: return item self.__delete(item) self.__append(item) return item def evict(self, path): if path not in self.entry: return item = self.entry[path] self.__delete(item) self.entry.pop(path) def execute(cache, cmd, path): global lookup_cnt, hit_cnt, write_cnt path_comp = path.split('/') father = '0' lookup_cnt += len(path_comp) - 1 for node in path_comp[1:-1]: key = father+'/'+node if cache.get(key) is None: cache.put(key) else: hit_cnt += 1 father = node node = path_comp[-1] key = father+'/'+node if cmd == 'rename' or cmd == 'setPermission' or cmd == 'delete': cache.evict(key) write_cnt += 1 else: lookup_cnt += 1 if cache.get(key) is None: cache.put(key) else: hit_cnt += 1 lookup_cnt = 0 hit_cnt = 0 write_cnt = 0 cache = Cache(10000) f = open('/media/ssd/linkedin/trace0.log') lcnt = 0 for l in f: lcnt += 1 if lcnt % 1000000 == 0: print lcnt print float(hit_cnt)/float(lookup_cnt) ls = l.split() op = ls[2] src = ls[3] dst = ls[4] if op in ['rename']: execute(cache, op, src) execute(cache, 'create', dst) else: execute(cache, op, src) f.close() print lookup_cnt, hit_cnt, write_cnt
UTF-8
Python
false
false
2,014
8,615,704,418,165
50a7ad5e93500a12c548f42092a6ec35b50cc05b
c7b95a2070b2e34052f738e44ed37db298c316ca
/novel/cluster/ClusterModule.py
685d82d586b82abd613b8cac2665dfd43664f170
[]
no_license
sunhaowen/NovelData
https://github.com/sunhaowen/NovelData
9423238b55c7bfbdc868619a820a89cda03feb7e
ab72a029739808901401efb4911ffb742e31417b
refs/heads/master
2016-09-05T15:26:35.192714
2014-06-04T03:17:59
2014-06-04T03:17:59
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
#!/usr/bin/env python # -*- coding:GBK __author__ = 'sunhaowen' __date__ = '2014-02-17 00:40' from collections import defaultdict from ConfigParser import SafeConfigParser from novel.cluster.ClusterDB import * from public.DisjointSet import * def here(): print('PrimeMusic') class ClusterModule(object): """ """ def __init__(self): """ """ self.logger = logging.getLogger('novel.cluster') self.err = logging.getLogger('err.cluster') def novel_node_check(self, novel_node_list): """ """ cluster_dict = {} for (gid, rid, site_status) in novel_node_list: if cluster_dict.has_key(gid): if cluster_dict[gid] != rid: self.err.warning('gid: {0}, rid: {1}, rid: {2}'.format(gid, cluster_dict[gid], rid)) else: cluster_dict[gid] = rid def novel_node_collection(self): """ """ cluster_db = ClusterDBModule() novel_node_list = cluster_db.get_noveldata_all('novel_cluster_dir_info_offline', ['gid', 'rid', 'site_status']) #self.novel_node_check(novel_node_list) self.logger.info('novel node number: {0}'.format(len(novel_node_list))) disjoint_set = DisjointSet() for (gid, rid, site_status) in novel_node_list: disjoint_set.add_novel_node(gid, site_status) def novel_edge_collection(self): """ """ cluster_db = ClusterDBModule() novel_edge_list = cluster_db.get_noveldata_all('novel_cluster_edge_info_offline', ['gid_x', 'gid_y']) self.logger.info('novel edge number: {0}'.format(len(novel_edge_list))) disjoint_set = DisjointSet() for (gid_x, gid_y) in novel_edge_list: disjoint_set.merge(gid_x, gid_y) def novel_cluster_update(self): """ """ disjoint_set = DisjointSet() update_tuple_list = disjoint_set.generate_update_tuple_list() self.logger.info('novel cluster update number: {0}'.format(len(update_tuple_list))) cluster_db = ClusterDBModule() for (gid, rid) in update_tuple_list: self.logger.info('gid: {0}, rid: {1}'.format(gid, rid)) cluster_db.update_novelclusterdirinfo_gid(gid, rid) def run(self): """ """ self.logger.info('novel cluster module start') self.novel_node_collection() self.novel_edge_collection() self.novel_cluster_update() self.logger.info('novel cluster module end') return True if __name__ == '__main__': here()
UTF-8
Python
false
false
2,014
14,972,256,006,355
d02e789b84d22056a1056eb6519f64992cec8844
f98e74162ca9c06df65f74440206dd4aa65cbf3f
/cachebustah.py
ba8ff72d78845bd28dc4c3ff0ca80e9d0da6f773
[]
no_license
sqor/Warmup
https://github.com/sqor/Warmup
d588ea4331065a3b0012ab6287aa618080b3e9eb
9db3306b134a0825de2a82d087588a7e98191a9b
refs/heads/master
2020-09-24T12:27:01.149486
2014-03-18T20:39:21
2014-03-18T20:39:21
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
import random import string newLines = [] localLines = [] START_JS_CACHE = "<!--CACHE-BUSTAH-START-->" END_JS_CACHE = "<!--CACHE-BUSTAH-END-->" def get_rand_str(): char_set = string.ascii_uppercase + string.digits return ''.join(random.sample(char_set*6, 6)) def read_file(): IS_LOCAL = False lines = open( "index_pre_prodified.html", "r" ) array = [] rand_ = get_rand_str() for line in lines: # Next we make sure to now ignore all the local script tags if line.find(START_JS_CACHE) >=0: IS_LOCAL = True line = "" if line.find(END_JS_CACHE) >=0: IS_LOCAL = False line = "" line = line.replace("main.css", "main.css?" + str(rand_ ) + "=" + str(rand_)); if IS_LOCAL: line = line.replace(".js", ".js?" + str(rand_ ) + "=" + str(rand_) ); newLines.append( line ) else: newLines.append( line ) # Read file and parse read_file() # Write File f = open('index.html','w') for line in newLines: f.write(line) f.close()
UTF-8
Python
false
false
2,014
12,524,124,636,242
573b035a58dec6c1016a30279eaec08f983f7ab8
8aaa7c9fd2238fef90b0a3f8ef2332a79cadca9a
/db/generate.py
1b1e2ad2677e2aa7ee760387e9222c654ae20aa8
[]
no_license
nromashchenko/bmstu
https://github.com/nromashchenko/bmstu
ea6cb381d48f17d0d8d34b61cb5512126ed83917
69c9d8bfe82b16b420719f080bfdc6867e97bb16
refs/heads/master
2015-08-04T20:42:00.938051
2013-05-15T07:37:30
2013-05-15T07:37:30
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
#!/usr/bin/python # -*- coding: utf-8 -*- import random class Generator: first_name = [u"Иван", u"Николай", u"Алексей"] last_name = [u"Иванов", u"Петров", u"Кузнецов"] patronymic = [u"Моисеевич", u"Эдуардович"] department = [u"Отдел вычислительных систем", "Бухгалтерия", u"Отдел технадзора"] product_name = [u"Canon CanoScan 382", u"Epson Perfection HD", u"HP ScanJet", u"Xerox DocuMate 700", u"Epson GT-S55", u"Epson WorkForce DS-30", u"Epson LQ-2190"] def get_name(self): result = self.__get(self.first_name) + " " result += self.__get(self.last_name) + " " result += self.__get(self.patronymic) return result def get_age(self): return random.randint(18, 65) def get_department(self): return self.__get(self.department) def get_product_name(self): return self.__get(self.product_name) def get_product_number(self): return random.randint(1000000, 9999999) def __get(self, source): length = len(source) index = random.randint(0, length-1) return source[index]; if __name__ == "__main__": gen = Generator() for i in range(1, 10): name = gen.get_name() print name
UTF-8
Python
false
false
2,013
8,443,905,734,448
24d4df5d55a92536ff954645a0a092a762cb690b
07fd0b80a4d12c1de6591e26a6bed285f2a05400
/src/sc/base/uploader/utility.py
48ac3cfb085a78cae9b9619d2ec22cfe426529e8
[]
no_license
simplesconsultoria/sc.base.uploader
https://github.com/simplesconsultoria/sc.base.uploader
ccc5c808c89c9a682d7fde481d863fbac8ebf6c9
c02a3b95ab08eee777d9031a895de754c773e4b8
refs/heads/master
2021-01-10T03:18:45.788037
2013-03-05T19:58:05
2013-03-05T19:58:05
8,558,080
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
# -*- coding:utf-8 -*- from collective.zipfiletransport.utilities import interfaces as zpinterfaces from os import close from os.path import split, splitext from plone.i18n.normalizer.interfaces import IURLNormalizer from Products.Archetypes.utils import shasattr from Products.ATContentTypes import interfaces from Products.CMFCore.utils import getToolByName from tempfile import TemporaryFile from urllib import unquote from zipfile import ZipFile, ZIP_DEFLATED from zope.component import queryUtility from zope.interface import implements from zope.site.hooks import getSite import unicodedata class ZipFileTransportUtility(object): """ ZipFileTransport Utility """ implements(zpinterfaces.IZipFileTransportUtility) # Import content to a zip file. # # file - The file input is a string of the full path name where # the zip file is saved. # context - Context refers to the container object where the objects # will be uploaded to. # desc - Description is the description to be attached to the # uploaded objects. # contributors - Contributors is the contributors message to be attached # to the uploaded objects. def importContent(self, file, context, description=None, contributors=None, categories=None, overwrite=False, excludefromnav=False): """ Import content from a zip file, creating the folder structure within a ZODB hierarchy. """ self.bad_folders = [] zf = ZipFile(file, 'r') files = [f.filename for f in zf.filelist] if len(files) < 1: return ('failure', 'The zip file was empty') for current_file in files: # If the current file is a folder move to the next file. if current_file[-1] == '/': continue if current_file[0] == '/': path_as_list = current_file[1:].split('/') else: path_as_list = current_file.split('/') file_name = self._convertToUnicode(path_as_list[-1]) file_name = unicodedata.normalize('NFC', file_name) normalizer = queryUtility(IURLNormalizer) normalized_file_name = normalizer.normalize(file_name) # Checks to make sure that the file path does not contain any # previouslsy found bad folders. if not self._checkFilePath(current_file, path_as_list): continue folder = self._createFolderStructure(path_as_list, context, excludefromnav) # no folder to add to? Then move on to next object. if not folder: continue id_available = folder.checkIdAvailable(id=normalized_file_name) # Create an object if everything looks good if id_available or overwrite: fdata = zf.read(current_file) if not id_available: folder.manage_delObjects([normalized_file_name]) obj = self._createObject(normalized_file_name, fdata, folder) obj.setTitle(file_name) if hasattr(obj, 'description') and description: obj.setDescription(description) if hasattr(obj, 'contributors') and contributors: obj.setContributors(contributors) if hasattr(obj, 'subject') and categories: obj.setSubject(categories) if excludefromnav: obj.setExcludeFromNav(True) obj.reindexObject() zf.close() def _checkFilePath(self, current_file, path_as_list): """ Make sure file isn't in a bad folder, if it is skip to the next one. """ for bad_folder in self.bad_folders: if current_file.find(bad_folder) == 0: return False return True def _createFolderStructure(self, path_as_list, parent, excludefromnav): """ Creates the folder structure given a path_part and parent object """ props = getToolByName(parent, 'portal_properties') folder_type = props.zipfile_properties.folder_type catalog = getToolByName(parent, 'portal_catalog') factory = getToolByName(parent, 'portal_factory') file_name = self._convertToUnicode(path_as_list[-1]) file_name = unicodedata.normalize('NFC', file_name) # Create the folder structure for i in range(len(path_as_list) - 1): path_part = self._convertToUnicode(path_as_list[i]) path_part = unicodedata.normalize('NFC', path_part) normalized_path_part = \ queryUtility(IURLNormalizer).normalize(path_part) current_path = '/'.join(path_as_list[:i + 1]) # If in the current folder, then just get the folder if normalized_path_part not in parent.objectIds(): # Checks to make sure that the folder is valid. if not parent.checkIdAvailable(id=normalized_path_part): self.bad_folders.append(current_path) return None parent.invokeFactory(type_name=folder_type, id=normalized_path_part) foldr = getattr(parent, normalized_path_part) foldr.setTitle(path_part) if excludefromnav: foldr.setExcludeFromNav(True) foldr = factory.doCreate(foldr, normalized_path_part) catalog.reindexObject(foldr, catalog.indexes()) else: foldr = getattr(parent, normalized_path_part) parent = foldr return parent def _createObject(self, filepath, fdata, parent): """ """ props = getToolByName(parent, 'portal_properties') image_type = props.zipfile_properties.image_type file_type = props.zipfile_properties.file_type doc_type = props.zipfile_properties.doc_type #folder_type = props.zipfile_properties.folder_type mt = parent.mimetypes_registry ext = filepath.split('.')[-1] ext = ext.lower() ftype = mt.lookupExtension(ext) if ftype: mimetype = ftype.normalized() newObjType = self._getFileObjectType(ftype.major(), mimetype) else: newObjType = self._getFileObjectType('application', 'application/octet-stream') mimetype = 'application/octet-stream' nm = filepath.split('/') if nm[-1]: filename = nm[-1] else: filename = nm[0] if filename not in parent.objectIds(): parent.invokeFactory(type_name=newObjType, id=filename) obj = getattr(parent, filename) obj.setTitle(splitext(filename)[0]) else: obj = getattr(parent, filename) if newObjType == image_type: obj.setImage(fdata) elif newObjType == doc_type: obj.setText(fdata) elif newObjType == file_type: obj.setFile(fdata) factory = getToolByName(parent, 'portal_factory') catalog = getToolByName(parent, 'portal_catalog') obj = factory.doCreate(obj, filename) obj.setFormat(mimetype) catalog.reindexObject(obj, catalog.indexes()) return obj def _getFileObjectType(self, major, mimetype): """ """ props = getToolByName(getSite(), 'portal_properties') image_type = props.zipfile_properties.image_type file_type = props.zipfile_properties.file_type doc_type = props.zipfile_properties.doc_type #folder_type = props.zipfile_properties.folder_type if 'image' == major: type = image_type elif mimetype in ['text/html', 'text/plain', 'text/structured', 'text/x-rst']: type = doc_type else: type = file_type return type def getTime(self, id): """ Returns the gmtime appended to the an id, used to obtain a unique id for the logFile object """ import time uid = id for tp in time.gmtime(): uid += str(tp) return uid def exportContent(self, context, obj_paths=None, filename=None): """ Export content to a zip file. context - Container refers to the container of all the objects that are to be exported. obj_paths - Refers to a list of paths of either objects or contexts that will be included in the zip file. filename - Refers to the fullpath filename of the exported zip file. """ objects_list = self._createObjectList(context, obj_paths) zip_path = self._getAllObjectsData(context, objects_list) return zip_path def exportContentInTempFile(self, context, obj_paths=None, filename=None): """ Export content to a zip file. """ objects_list = self._createObjectList(context, obj_paths) tfile = TemporaryFile() self._getAllObjectsData(context, objects_list, tfile) size = tfile.tell() tfile.seek(0) return tfile, size def _createObjectList(self, context, obj_paths=None, state=None): """ Create a list of objects by iteratively descending a folder tree... or trees (if obj_paths is set). """ objects_list = [] if obj_paths: portal = getToolByName(context, 'portal_url').getPortalObject() wt = getToolByName(portal, 'portal_workflow') for path in obj_paths: obj = portal.restrictedTraverse(path) # if this is a folder, then add everything in this folder to # the obj_paths list otherwise simply add the object. if obj.isPrincipiaFolderish: self._appendItemsToList(folder=obj, list=objects_list, state=state) elif obj not in objects_list: if state: if wt.getInfoFor(obj, 'review_state') in state: objects_list.append(obj) else: objects_list.append(obj) else: #create a list of the objects that are contained by the context self._appendItemsToList(folder=context, list=objects_list, state=state) return objects_list def generateSafeFileName(self, file_name): """ Remove illegal characters from the exported filename. """ file_name = unquote(file_name) return file_name def _getAllObjectsData(self, context, objects_listing): """ Returns the data in all files with a content object to be placed in a zipfile """ import tempfile # Use temporary IO object instead of writing to filesystem. fd, path = tempfile.mkstemp('.zipfiletransport') close(fd) zipFile = ZipFile(path, 'w', ZIP_DEFLATED) context_path = str(context.virtual_url_path()) for obj in objects_listing: object_extension = '' object_path = str(obj.virtual_url_path()) is_file = self._objImplementsInterface(obj, interfaces.IATFile) is_image = self._objImplementsInterface(obj, interfaces.IATImage) if (is_file or is_image): file_data = str(obj.data) object_path = object_path.replace(context_path + '/', '') # Add an extension if we do not have one already if hasattr(obj, 'getContentType'): mime = obj.getContentType() if "image/jpeg" in mime: object_extension = '.jpg' elif "image/png" in mime: object_extension = '.png' elif "image/gif" in mime: object_extension = '.gif' if object_extension in obj.getId(): object_extension = '' elif self._objImplementsInterface(obj, interfaces.IATDocument): if "text/html" == obj.Format(): file_data = obj.getText() object_extension = ".html" elif "text/x-rst" == obj.Format(): file_data = obj.getRawText() object_extension = ".rst" elif "text/structured" == obj.Format(): file_data = obj.getRawText() object_extension = ".stx" elif "text/plain" == obj.Format(): file_data = obj.getRawText() object_extension = ".txt" else: file_data = obj.getRawText() object_path = object_path.replace(context_path + '/', '') elif self._objImplementsInterface(obj, interfaces.IATFolder): if hasattr(obj, 'getRawText'): file_data = obj.getRawText() if object_path == context_path: object_path = object_path.split("/")[-1] else: object_path = object_path.replace(context_path + '/', '') if (object_path[-4:] != ".htm"): object_extension = ".html" else: continue # start point for object path, adding 1 removes the initial '/' object_path = self.generateSafeFileName(object_path) if object_path: # reconstruct path with filename, restores non-ascii # characters in filenames filename_path = [] for i in range(0, len(object_path.split('/'))): filename_path.append(obj.getId()) obj = obj.aq_inner.aq_parent if len(filename_path) > 1: filename_path.reverse() filename_path = '/'.join(filename_path) else: filename_path = filename_path[0] # Add the correct file extension if filename_path[-len(object_extension):] != object_extension: filename_path += object_extension if 'Windows' in context.REQUEST['HTTP_USER_AGENT']: filename_path = filename_path.decode('utf-8') filename_path = filename_path.encode('cp437') zipFile.writestr(filename_path, file_data) zipFile.close() return path def _objImplementsInterface(self, obj, interfaceClass): """ Return boolean indicating if obj implements the given interface. """ if shasattr(interfaceClass, 'providedBy') and \ interfaceClass.providedBy(obj): return True if not shasattr(obj, '__implements__'): return False if interfaceClass in self._tupleTreeToList(obj.__implements__): return True def _tupleTreeToList(self, t, lsa=None): """Convert an instance, or tree of tuples, into list.""" import types if lsa is None: lsa = [] if isinstance(t, types.TupleType): for o in t: self._tupleTreeToList(o, lsa) else: lsa.append(t) return lsa def _appendItemsToList(self, folder, list, state): """ """ ct = getToolByName(folder, 'portal_catalog') wt = getToolByName(folder, 'portal_workflow') path = '/'.join(folder.getPhysicalPath()) brains = ct.searchResults(path={'query': path}) for brain_object in brains: obj = brain_object.getObject() if not (obj in list or obj.isPrincipiaFolderish): if state: if wt.getInfoFor(obj, 'review_state') in state: list.append(obj) else: list.append(obj) return list def _convertToUnicode(self, bytestring): """ Convert bytestring into unicode object """ # *nix encoding try: unicode_text = unicode(bytestring, 'utf-8') # WinZip encoding except UnicodeDecodeError: unicode_text = unicode(bytestring, 'cp437') return unicode_text # # Utility functions for use by outside tools. # # def getZipFilenames(self, zfile): """ Gets a list of filenames in the Zip archive.""" try: f = ZipFile(zfile) except: #XXX: This is ugly f = None return f and f.namelist() or [] def getZipFileInfo(self, zfile): """ Gets info about the files in a Zip archive. """ mt = self.mimetypes_registry f = ZipFile(zfile) fileinfo = [] for x in f.infolist(): fileinfo.append((x.filename, mt.lookupExtension(x.filename).normalized(), x.file_size)) return fileinfo def getZipFile(self, zfile, filename): """ Gets a file from the Zip archive. """ mt = self.mimetypes_registry f = ZipFile(zfile) finfo = f.getinfo(filename) fn = split(finfo.filename)[1] # Get the file name path = fn.replace('\\', '/') fp = path.split('/') # Split the file path into a list if '' == fn: return 'dir', fn, fp, None, None, 0, None ftype = mt.lookupExtension(finfo.filename) if not ftype: major = 'application' mimetype = 'application/octet-stream' else: major = ftype.major() mimetype = ftype.normalized() fdata = f.read(filename) return 'file', fn, fp, major, mimetype, finfo.file_size, fdata def get_zipfile_name(self): return 'Test.zip' zipfiletransport_utility = ZipFileTransportUtility()
UTF-8
Python
false
false
2,013
9,440,338,143,324
cb05c2490be60fcbc72afce15389303ae36f68d2
5f0f0865b7e4e2aa1867a88c138df56936c0b23b
/blocks/tests/test_pylearn2.py
6317698fe0dd1326830e4961c5d482d72d3399e1
[ "MIT" ]
permissive
jych/blocks
https://github.com/jych/blocks
2c709dcf042f4259981adcb54d9e3a48dac0c87f
995cb7b67545b272877ecf9e90285cc71c9e6091
refs/heads/master
2021-01-09T06:51:34.967301
2014-11-27T04:12:40
2014-11-27T04:12:40
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
import logging import numpy import pylearn2 from pylearn2.space import VectorSpace from pylearn2.testing.datasets import random_dense_design_matrix from pylearn2.train import Train from pylearn2.training_algorithms.sgd import SGD from blocks.bricks import Sigmoid, MLP from blocks.cost import SquaredError from blocks.initialization import IsotropicGaussian, Constant from blocks.pylearn2 import BlocksModel, BlocksCost def test_pylearn2_trainin(): # Construct the model mlp = MLP(activations=[Sigmoid(), Sigmoid()], dims=[784, 100, 784], weights_init=IsotropicGaussian(), biases_init=Constant(0.01)) mlp.initialize() cost = SquaredError() block_cost = BlocksCost(cost) block_model = BlocksModel(mlp, (VectorSpace(dim=784), 'features')) # Load the data rng = numpy.random.RandomState(14) train_dataset = random_dense_design_matrix(rng, 1024, 784, 10) valid_dataset = random_dense_design_matrix(rng, 1024, 784, 10) # Silence Pylearn2's logger logger = logging.getLogger(pylearn2.__name__) logger.setLevel(logging.ERROR) # Training algorithm sgd = SGD(learning_rate=0.01, cost=block_cost, batch_size=128, monitoring_dataset=valid_dataset) train = Train(train_dataset, block_model, algorithm=sgd) train.main_loop(time_budget=3)
UTF-8
Python
false
false
2,014
12,214,887,034,271
cd97700066da3c4491237fbb39925576fe9b692e
327380150aa40a43082b0da4dc6f906b2f2b8436
/Debug/logparser.py
634fc872462571021585a90462415db0c066a0db
[]
no_license
fuji246/band2
https://github.com/fuji246/band2
0a071474e682ee0e667033c62b86450fa6ba2af1
d628559470f18e4c0f557fa188eacbb850963481
refs/heads/master
2020-05-20T16:35:12.846409
2011-05-24T10:01:09
2011-05-24T10:01:09
32,116,914
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
import re import os import sys import matplotlib.pyplot as plt from statlib import stats from trend import show_trend from sim import sim_pearson from kalman import kalman_filter here = lambda x: os.path.join(os.path.abspath(os.path.dirname(__file__)), x) pause = lambda: raw_input('\nPress Enter to continue') PROTO_STR = lambda x: "TCP" if x==1 else ("UDP" if x== 2 else "PSEUDOTCP") RUN_START_MARK = "----------begin new run-----------" metrics_re = re.compile(r'DEBUG - \[(\d+)\] conv_id = (\d+) now = (\d+) ms, send_ts = (\d+) ms, rtt = (\-?\d+) ms, rel_owd = (\-?\d+) ms recv_rate = (\d+)Bytes/sec pkt_loss = (\d+)%') ratelimit_re = re.compile(r'SetRateLimit, conv_id = (\d+), ratelimit = (\d+), proto = (\d+)') TICK_RESOLUTION = 100 #ms class Metrics(object): def __init__(self): self.proto = -1 self.conv_id = 0 self.tick = 0 self.rtt = 0 self.owd = 0 self.loss = 0 self.recv_rate = 0 self.seq = 0 def __str__(self): return '[%d] - [%s] seq = %d, tick = %d, rtt = %d, owd = %d, loss = %d, recv_rate = %d'%(self.conv_id, self.proto, self.seq, self.tick, self.rtt, self.owd, self.loss, self.recv_rate) def GetRunStartPos(f): #f = open(filepath,'r') mark_pos = -1 #DO NOT use readlines() here while True: l = f.readline() if l == '': break #print f.tell() if l.find(RUN_START_MARK) >= 0: mark_pos = f.tell() #print mark_pos, l #pause() #f.close() f.seek(0) assert mark_pos >= 0 return mark_pos def GetMetricsOfConvs(filepath): f = open(filepath,'r') f.seek(GetRunStartPos(f)) min_tick = sys.maxint max_tick = 0 metrics_map = {} attr_map = {} #{conv_id:[ratelimit,proto]} for l in f.readlines(): metric = None result = metrics_re.search(l) if result is not None: metric = Metrics() metric.proto = PROTO_STR(int(result.groups()[0])) metric.conv_id = int(result.groups()[1]) metric.tick = int(result.groups()[3]) metric.rtt = int(result.groups()[4]) metric.owd = int(result.groups()[5]) metric.recv_rate = int(result.groups()[6]) metric.loss = int(result.groups()[7]) print metric min_tick = min(metric.tick, min_tick) max_tick = max(metric.tick, max_tick) records = metrics_map.setdefault(metric.conv_id, []) records.append(metric) else: result = ratelimit_re.search(l) if result is not None: attr_map[int(result.groups()[0])] = [int(result.groups()[1]), PROTO_STR(int(result.groups()[2]))] #print l #pause() f.close() print attr_map getseq = lambda tick, min_tick, resol: tick/resol-min_tick/resol pref_metrics = {}#{conv_id:{seq:[metrics],...},...} for k,v in metrics_map.iteritems(): print '--------------------conv_id [%d]'%k seq_records = pref_metrics.setdefault(k, {}) for metric_item in v: metric_item.seq = getseq(metric_item.tick, min_tick, TICK_RESOLUTION) print metric_item metrics_list = seq_records.setdefault(metric_item.seq, []) metrics_list.append(metric_item) print '\nretreive rtt and plot graph, calc sim' #retreive rtt and plot graph, calc sim val_lists = [] ''' lst = [(seq0,seq1,...),(val0,val1,...)] lists = [(id1,lst1), (id2,lst2), ...] ''' pref_val = {}#{conv_id:{{seq:val},...},...} for k,v in pref_metrics.iteritems():#{conv_id:{seq:[metrics],...},...} pref_val[k] = {} seq_lst = [] val_lst = [] #print '----------->' #not sorted by seq, need sort sortedlist = sorted(v.iteritems(),key=lambda x: x[0]) proto = 0 for seq, metrics in sortedlist: #print seq, metrics seq_lst.append(seq) mid_val = GetMidValue([item.owd for item in metrics]) val_lst.append(mid_val) pref_val[k][seq] = mid_val #min_value = min(val_lst) #val_lst = [i-min_value for i in val_lst] val_lst = kalman_filter(val_lst) print 'spearman :', stats.lspearmanr(range(len(val_lst)), val_lst) #print 'add to list: %s'%([[k,attr_map[k]], [seq_lst, val_lst]]) val_lists.append([[k,attr_map[k]], [seq_lst, val_lst]]) ''' lists = [] print '--------------------' for k,v in metrics_map.iteritems(): print 'conv_id [%d]'%k lst = [] for i in v: lst.append(i.rtt) print i lists.append(lst) ShowLinesGraph(lists, 'trend', 'time', 'value') ''' conv_ids = [ id for id in pref_metrics ] cnt = len(conv_ids) for i in range(cnt): for j in range(i+1, cnt): sim_val = sim_pearson(pref_val, conv_ids[i], conv_ids[j]) print 'sim of [%d - %d]: %f\n'%(conv_ids[i], conv_ids[j], sim_val) pause() ShowLinesGraph(val_lists, 'trend', 'time', 'value') def GetMidValue(d): mid_val = 0 mid = len(d)/2 if len(d)%2 == 0: mid_val = (d[mid-1]+d[mid])/2 else: mid_val = d[mid] return mid_val def GetColors(cnt): #TODO: add more colors in RGB color_list = ['b','g','r','y','m','c','k'] assert cnt <= len(color_list) return color_list[:cnt] def FormatSpeed(i): RATE_FORMAT_LIST = ['B/s', 'KB/s', 'MB/s', 'GB/s'] n = 0 tmp = i while tmp >= 1024: n += 1 tmp /= 1024.0 return '%.2f %s'%(tmp, RATE_FORMAT_LIST[n]) def ShowLinesGraph(lists, title, xlabel, ylabel): ''' lst = [(x0,x1,...),(y0,y1,...)] lists = [(id1,lst1), (id2,lst2), ...] ''' if len(lists) == 0: print "No feedbacks found! Check the connectivity to server" return for i in lists: print '\n\nconv_id: %d, rate:%d proto:%s'%(i[0][0],i[0][1][0], i[0][1][1]) print '\nseq:', i[1][0] print '\nval:', i[1][1] plt.title(title) plt.xlabel(xlabel) plt.ylabel(ylabel) cnt = len(lists) colors = GetColors(cnt) for i in range(0, cnt): txt = 'conv_id: %d, rate: %s, [%s]'%(lists[i][0][0],FormatSpeed(lists[i][0][1][0]),lists[i][0][1][1]) plt.text(0.1, 0.6+0.1*i, txt, color=colors[i],transform=plt.gca().transAxes) plt.plot(lists[i][1][0],lists[i][1][1], colors[i], linestyle='-',marker='o') xmax = max([max(lst[1][0]) for lst in lists])+2 ymax = max([max(lst[1][1]) for lst in lists])+2 plt.axis([0, xmax, 0, ymax]) plt.show() def GetRelOwd(filepath): restring = re.compile(r'rel_owd = (\-?\d+)')#may be minus value f = open(filepath,'r') owd_list = [] owd_max = -sys.maxint-1 owd_min = sys.maxint for l in f.readlines(): #print l result = restring.search(l) if result is not None: owd = int(result.groups()[0]) #print owd, owd_max, owd_min owd_list.append(owd) if owd > owd_max: owd_max = owd if owd_min > owd: owd_min = owd #print owd, owd_max, owd_min #pause() #print owd_list f.close() return owd_list, owd_max, owd_min def CalcOwd(filepath): restring = re.compile(r'ts_send = (\d+) ts_recv = (\d+)') f = open(filepath,'r') ts_send = [] ts_recv = [] for l in f.readlines(): result = restring.search(l) if result is not None: ts_send.append(int(result.groups()[0])) ts_recv.append(int(result.groups()[1])) f.close() print ts_send print ts_recv delta_ts_send = [] for i in range(len(ts_send)-1): delta_ts_send.append(ts_send[i+1] - ts_send[i]) delta_ts_recv = [] for i in range(len(ts_recv)-1): delta_ts_recv.append(ts_recv[i+1] - ts_recv[i]) print delta_ts_send print delta_ts_recv for i in range(len(delta_ts_send)): print delta_ts_recv[i] - delta_ts_send[i] def GetRTT(filepath): restring = re.compile(r'rtt = (\d+)') f = open(filepath,'r') rtt_list = [] rtt_max = -sys.maxint-1 rtt_min = sys.maxint for l in f.readlines(): #print l result = restring.search(l) if result is not None: rtt = int(result.groups()[0]) rtt_list.append(rtt) if rtt > rtt_max: rtt_max = rtt if rtt_min > rtt: rtt_min = rtt #print rtt_list f.close() return rtt_list, rtt_max, rtt_min def GetRecvRate(filepath): restring = re.compile(r'recv_rate = (\d+)') f = open(filepath,'r') recvrate_list = [] for l in f.readlines(): #print l result = restring.search(l) if result is not None: recvrate = int(result.groups()[0]) recvrate_list.append(recvrate) f.close() return recvrate_list def GetPktLoss(filepath): #TODO handle with seq loop restring = re.compile(r'\[server\] - .* seq = (\d+)') start_seq = seq_expect = None f = open(filepath,'r') loss = 0 for l in f.readlines(): #print l result = restring.search(l) if result is not None: seq = int(result.groups()[0]) if seq_expect is None: seq_expect = start_seq = seq if seq > seq_expect: #print 'loss, ',seq - seq_expect loss += (seq - seq_expect) print 'loss %d packets, seq = %d, seq_expect = %d'%(seq - seq_expect, seq ,seq_expect) seq_expect = seq + 1 elif seq == seq_expect: seq_expect = seq + 1 elif seq < seq_expect: print 'error, no dup and out-of-order packets assumed! seq = %d, seq_expect = %d'%(seq ,seq_expect) #print seq_expect f.close() return (loss*100.0/(seq_expect-start_seq), loss, seq_expect-start_seq) def ShowGraph(d, title, xlabel, ylabel): plt.title(title) plt.xlabel(xlabel) plt.ylabel(ylabel) plt.plot(range(1,len(d)+1),d, 'black', linestyle='-',marker='o') plt.axis([0, len(d)+2, 0, max(d)+0.2*max(d)]) plt.show() def ShowTwoLineGraph(d1, d2, title, xlabel, ylabel): plt.title(title) plt.xlabel(xlabel) plt.ylabel(ylabel) plt.plot(range(1,len(d1)+1),d1, 'black', linestyle='-',marker='o') plt.plot(range(1,len(d2)+1),d2, 'r', linestyle='-',marker='^') xmax = max(len(d2)+2, len(d1)+2) ymax = max(max(d2)+2, max(d1)+2) plt.axis([0, xmax, 0, ymax]) plt.show() def PlotRelOWD(filepath): result = GetRelOwd(filepath) owd_list = result[0] owd_max = result[1] owd_min = result[2] print 'owd_max = ',owd_max - owd_min print 'owd_min = ',owd_min - owd_min owd_list = [ i - owd_min for i in owd_list ] ShowGraph(owd_list, 'OWD Graph', 'time', 'owd') show_trend(owd_list) n = 0 total = len(owd_list) while n < total: l = min(500, total-n) #print owd_list[n:n+l] ShowGraph(owd_list[n:n+l], 'OWD Graph', 'time', 'owd') show_trend(owd_list[n:n+l]) n += l f = open('rel_owd.txt','w+') for i in owd_list: f.write('%s\n'%i) f.close() def PlotRTT(filepath): result = GetRTT(filepath) rtt_list = result[0] rtt_max = result[1] rtt_min = result[2] print 'rtt_max = ',rtt_max print 'rtt_min = ',rtt_min #ShowGraph(rtt_list, 'RTT Graph', 'time', 'rtt') n = 0 total = len(rtt_list) while n < total: l = min(500, total-n) #print rtt_list[n:n+l] ShowGraph(rtt_list[n:n+l], 'RTT Graph', 'time', 'rtt') n += l #owd_list = [ str(int(i)-int(owd_list[0])) for i in owd_list] #f = open('rel_owd.txt','w+') #for i in owd_list: # f.write('%s\n'%i) #f.close() def PlotRttOwd(filepath): result = GetRTT(filepath) rtt_list = result[0] rtt_max = result[1] rtt_min = result[2] print 'rtt_max = ',rtt_max print 'rtt_min = ',rtt_min result = GetRelOwd(filepath) owd_list = result[0] owd_max = result[1] owd_min = result[2] print 'owd_max = ',owd_max - owd_min print 'owd_min = ',owd_min - owd_min owd_list = [ i - owd_min for i in owd_list ] assert len(owd_list) == len(rtt_list) #ShowTwoLineGraph(rtt_list,owd_list, '[b]RTT-[r]OWD Graph', 'time', 'value') #return n = 0 total = len(owd_list) while n < total: l = min(100, total-n) #print owd_list[n:n+l] #pause() ShowTwoLineGraph(rtt_list[n:n+l],owd_list[n:n+l], '[b]RTT-[r]OWD Graph', 'time', 'value') n += l def PlotRecvRate(filepath): recvrate_list = GetRecvRate(filepath) #ShowGraph(recvrate_list, 'RecvRate Graph', 'time', 'recvrate') n = 0 total = len(recvrate_list) while n < total: l = min(500, total-n) #print owd_list[n:n+l] ShowGraph(recvrate_list[n:n+l], 'RecvRate Graph', 'time', 'recvrate') n += l if __name__ == '__main__': filepath = 'netdect.log' if len(sys.argv) == 2: filepath = sys.argv[1] #PlotRelOWD(filepath) #PlotRecvRate(filepath) #pkt_loss = GetPktLoss(filepath) #print 'pkt_loss = %.2f%%[%d/%d]'%pkt_loss #PlotRelOWD(filepath) #PlotRTT(filepath) #PlotRttOwd(filepath) #CalcOwd(filepath) GetMetricsOfConvs(filepath)
UTF-8
Python
false
false
2,011
1,786,706,443,631
98123d970256ba3dd48756252a96106fabcef377
fa048daacd361e431dd61954c4e8313b464aee29
/src/Declaraciones.py
7242232a2ab448010fbe477726dadfc6e3d7859b
[]
no_license
drmelectronic/econadmin1v0
https://github.com/drmelectronic/econadmin1v0
a946b3bd766a7126a50dd436aa010679445bd690
989eef8d4dfd5d893a481557b627511ef69d1abe
refs/heads/master
2016-09-05T12:05:51.345894
2014-01-07T18:55:24
2014-01-07T18:55:24
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
#! /usr/bin/python # -*- coding: utf-8 -*- import gtk import Contactos import Estilos import gobject __author__="danielypamela" __date__ ="$26/10/2011 05:53:37 PM$" class Ventana(gtk.Window): """ Clase Ventana Principal """ def __init__ (self,consulta): """ Class initialiser """ gtk.Window.__init__(self,gtk.WINDOW_TOPLEVEL) self.cursor = consulta.cursor self.set_title("Declaraciones") vbox_main = gtk.VBox(False,0) self.add(vbox_main) if __name__ == "__main__": print "Hello World";
UTF-8
Python
false
false
2,014
11,776,800,353,826
602c76d3a0b6038225c07cc58fd63ae3014935a8
9033a9ed26e5f535d4b70d05a6729a49668d4d7d
/generate_secret.py
debf577236ef532a6a57d0a71f7901679a5c3377
[]
no_license
nigma/sentry-on-dotcloud
https://github.com/nigma/sentry-on-dotcloud
4c1af57cbe3db8719418a3e54ef0ed75e8c103e6
4523f2cc72b987eb0e4b2bd35f4cab5b04ac8b93
refs/heads/master
2020-04-09T07:43:28.948577
2012-05-13T00:48:17
2012-05-13T00:48:17
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
#!/usr/bin/env python #-*- coding: utf-8 -*- import random chars = "abcdefghijklmnopqrstuvwxyz0123456789!@#$%^&*(-_=+)" print "".join([random.choice(chars) for i in range(50)])
UTF-8
Python
false
false
2,012
13,099,650,259,080
3a3456fabf7e5ae263c466c1c7df4fe8efb3d0bd
fd05db3342ddd37012b2e75a6f3db64e2b6c47a8
/principal/comunicados.py
8da1d3d00c81543392b4c1ad88b41998ec6f3729
[]
no_license
arcr/Sisgesa-refactory
https://github.com/arcr/Sisgesa-refactory
4536aa5e393dec4b470e9ccbc856b9bbd3d4df02
c306580dcba3045963b98332dfc651722a8ae3ab
refs/heads/master
2015-08-07T23:11:07.754898
2013-05-22T00:46:21
2013-05-22T00:46:21
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
from django.contrib.auth import login, authenticate, logout from django.contrib.auth.decorators import login_required from django.contrib.auth.forms import AuthenticationForm from django.contrib.auth.models import User from django.core import serializers from django.http import HttpResponse, HttpResponseRedirect, Http404 from django.shortcuts import render_to_response from django.template import RequestContext from django.utils import simplejson as json from principal import forms, models, funciones from principal.funciones import user_in_group from principal.models import * import datetime @login_required(login_url="/") def comunicados(request): user = request.user if user_in_group(user,"Profesor"): return render_to_response('comunicados/form_registrar_comunicado.html', context_instance=RequestContext(request)) else: return HttpResponseRedirect('/') @login_required(login_url="/") def ajax_reg_comunicado(request): user = request.user if user_in_group(user,"Profesor"): if request.is_ajax(): grado = request.POST['grado'] seccion = request.POST['seccion'] titulo = request.POST['titulo'] descripcion = request.POST['descripcion'] try: ensenia = Ensenia.objects.get(seccion__id= seccion, cursogrado__id=grado, profesor__usuario__id=user.id) crea_comunicado=Comunicado(titulo=titulo, descripcion=descripcion) crea_comunicado.save() crea_comunica=Comunica(comunicado_id=crea_comunicado.id, ensenia_id=ensenia.id) crea_comunica.save() dato=True except: dato=False return HttpResponse(dato) else: raise Http404 else: return HttpResponseRedirect('/') @login_required(login_url="/") def ajax_grados_profesor(request): user = request.user if user_in_group(user,"Profesor"): ctx = {} grados = Ensenia.objects.filter(profesor__usuario__id=user.id) for grado in grados: ctx[grado.cursogrado.grado] = {} data = serializers.serialize('json',ctx) return HttpResponse(data , mimetype='application/json') else: return HttpResponseRedirect('/') @login_required(login_url="/") def ajax_secciones_profesor(request): user = request.user if user_in_group(user,"Profesor"): grado = request.GET['grado'] ctx = {} secciones = Ensenia.objects.filter(profesor__usuario__id=user.id, cursogrado__grado__id=grado) for seccion in secciones: ctx[seccion.seccion] = {} data = serializers.serialize('json',ctx) return HttpResponse(data , mimetype='application/json') else: return HttpResponseRedirect('/') # Ver comunicados alumnos @login_required(login_url="/") def ver_comunicados(request): user = request.user if user_in_group(user,"Alumno"): alumno = request.user detalle_alumno = Matricula.objects.get(alumno__usuario__username=alumno.username) print detalle_alumno.seccion print detalle_alumno.grado comunicados_profesor= Comunica.objects.filter(ensenia__seccion__nombre=detalle_alumno.seccion.nombre , ensenia__cursogrado__grado__nombre=detalle_alumno.grado.nombre) print comunicados_profesor ctx = {'comunicados_profesor' : comunicados_profesor} return render_to_response('comunicados/alumno_ver_comunicados.html',ctx,context_instance=RequestContext(request)) else: return HttpResponseRedirect('/')
UTF-8
Python
false
false
2,013
10,557,029,652,265
37b373219f5771a97916044eff5827aed905b1eb
11882c1b5432dfeea7533478b273d7533b54401c
/camelot/core/threading.py
879300fc5397443af31ff40877001ddead31c037
[ "GPL-1.0-or-later", "GPL-2.0-only", "LicenseRef-scancode-warranty-disclaimer" ]
non_permissive
kurtraschke/camelot
https://github.com/kurtraschke/camelot
99fef6f16d7022724f9fff98d4d45ed66f8424a1
f9bfeb05194f328c5ddaa99d69fbc23e558436d3
refs/heads/master
2020-12-24T15:40:23.890056
2011-02-12T14:52:48
2011-02-12T14:52:48
1,231,948
4
1
null
null
null
null
null
null
null
null
null
null
null
null
null
# ============================================================================ # # Copyright (C) 2007-2010 Conceptive Engineering bvba. All rights reserved. # www.conceptive.be / [email protected] # # This file is part of the Camelot Library. # # This file may be used under the terms of the GNU General Public # License version 2.0 as published by the Free Software Foundation # and appearing in the file license.txt included in the packaging of # this file. Please review this information to ensure GNU # General Public Licensing requirements will be met. # # If you are unsure which license is appropriate for your use, please # visit www.python-camelot.com or contact [email protected] # # This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE # WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. # # For use of this library in commercial applications, please contact # [email protected] # # ============================================================================ ''' Some helper functions and classes related to threading issues ''' from PyQt4 import QtCore def synchronized( original_function ): """Decorator for synchronized access to an object, the object should have an attribute _mutex which is of type QMutex """ from functools import wraps @wraps( original_function ) def wrapper(self, *args, **kwargs): locker = QtCore.QMutexLocker(self._mutex) result = original_function(self, *args, **kwargs) locker.unlock() return result return wrapper
UTF-8
Python
false
false
2,011
592,705,530,101
ab33821387e3f0e911eb896f8c8607dba3f822b5
8a7aec7253dd1a0c3560cb71d354c1bcb1102c66
/tools/prepare-bisect-perf-regression.py
fbecafd2af5eedf1dffbc579488abfb86b3c0704
[ "BSD-3-Clause", "LGPL-2.1-only", "MPL-1.1", "MIT", "LicenseRef-scancode-unknown-license-reference", "GPL-2.0-only", "Apache-2.0", "LicenseRef-scancode-unknown" ]
non_permissive
loopCM/chromium
https://github.com/loopCM/chromium
78e60fe2945f40d5ab3a77a6fd36c667ca323b0f
8db1d931e4e1609d7d8f021ecb4fd2db0b92cb87
HEAD
2019-07-18T09:18:52.643862
2013-05-21T00:44:40
2013-05-21T00:44:40
10,188,303
7
5
null
null
null
null
null
null
null
null
null
null
null
null
null
#!/usr/bin/env python # Copyright (c) 2013 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Prepare Performance Test Bisect Tool This script is used by a trybot to create a working directory and sync an initial copy of the depot for use in bisecting performance regressions. An example usage: ./tools/prepare-bisect-perf-regressions.py --working_directory "~/builds" --output_buildbot_annotations Would result in creating ~/builds/bisect and then populating it with a copy of the depot. """ import optparse import sys import bisect_utils def main(): usage = ('%prog [options] [-- chromium-options]\n' 'Prepares a temporary depot for use on a trybot.') parser = optparse.OptionParser(usage=usage) parser.add_option('-w', '--working_directory', type='str', help='Path to the working directory where the script will ' 'do an initial checkout of the chromium depot. The ' 'files will be placed in a subdirectory "bisect" under ' 'working_directory and that will be used to perform the ' 'bisection.') parser.add_option('--output_buildbot_annotations', action="store_true", help='Add extra annotation output for buildbot.') (opts, args) = parser.parse_args() if not opts.working_directory: print 'Error: missing required parameter: --working_directory' print parser.print_help() return 1 return bisect_utils.CreateBisectDirectoryAndSetupDepot(opts, True) if __name__ == '__main__': sys.exit(main())
UTF-8
Python
false
false
2,013
10,153,302,690,640
b664214473b1224c4e9b1b550f9e2195ed18eb67
2d7e1925fb0509fd13042db31532937bf05af7ef
/praw/backport.py
8d7289b6bc61f17dd6f6068844f66af691372f3a
[ "GPL-3.0-only" ]
non_permissive
econner/praw
https://github.com/econner/praw
00f0277bd9c2e896dcd10c5976b4ee0c869651d6
896943bea4a70449a330310975f6ea51f5e083d7
refs/heads/master
2020-12-14T18:14:47.321044
2012-08-12T02:12:46
2012-08-12T02:26:57
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
# This file is part of PRAW. # # PRAW is free software: you can redistribute it and/or modify it under the # terms of the GNU General Public License as published by the Free Software # Foundation, either version 3 of the License, or (at your option) any later # version. # # PRAW is distributed in the hope that it will be useful, but WITHOUT ANY # WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR # A PARTICULAR PURPOSE. See the GNU General Public License for more details. # # You should have received a copy of the GNU General Public License along with # PRAW. If not, see <http://www.gnu.org/licenses/>. from six import MovedAttribute, add_move def add_moves(): add_move(MovedAttribute('HTTPError', 'urllib2', 'urllib.error')) add_move(MovedAttribute('HTTPCookieProcessor', 'urllib2', 'urllib.request')) add_move(MovedAttribute('Request', 'urllib2', 'urllib.request')) add_move(MovedAttribute('URLError', 'urllib2', 'urllib.error')) add_move(MovedAttribute('build_opener', 'urllib2', 'urllib.request')) add_move(MovedAttribute('quote', 'urllib2', 'urllib.parse')) add_move(MovedAttribute('urlencode', 'urllib', 'urllib.parse')) add_move(MovedAttribute('urljoin', 'urlparse', 'urllib.parse'))
UTF-8
Python
false
false
2,012
5,428,838,704,741
37038bc7dcb093ec904a3369ae12f01a28a23150
7bc298947424fb425e4145b94567682f462f0442
/broker/management/commands/create_bus_occurrences.py
1582d08808336ff0ff3355bedcf2407f6efe004b
[ "BSD-2-Clause" ]
permissive
ricardosasilva/fiscalcidadao-web
https://github.com/ricardosasilva/fiscalcidadao-web
aab1355ec9b2c4a0a9ac17794019cd0544845ea3
69286924d1f457c11dd022b9a409738668dee672
refs/heads/master
2021-01-19T16:51:43.075615
2013-10-27T16:20:04
2013-10-27T16:20:04
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
# -*- coding: utf-8 -*- # # Copyright (c) 2013, # Diogo Baeder, Francisco Ciliao, Gabriel Palacio, Ricardo Silva, Vitor George # All rights reserved. # # Redistribution and use in source and binary forms, with or without modificat- # ion, are permitted provided that the following conditions are met: # # - Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # - Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. # from broker.models import Occurrence, Fact from django.contrib.gis.geos.point import Point from django.core.management.base import BaseCommand from multigtfs.models.route import Route from multigtfs.models.stop import Stop from optparse import make_option from random import choice from utils import randomDate import random def random_stop_position(): stop = Stop.objects.filter().order_by('?')[0] point = Point(float(stop.lon), float(stop.lat), srid=4326) return point def random_photo(): photos = ( 'occurrence_photos/1.jpg', None, 'occurrence_photos/2.jpg', 'occurrence_photos/3.jpg', None, None) return choice(photos) def random_comment(): comments = ( u'Precisa melhorar', '', '', u'Não gostei', '', '', '', ) return choice(comments) def get_date(): return randomDate("2010-9-1 00:00+0300", "2013-10-26 23:50+0300", random.random()) def get_bus_fact(): fact = Fact.objects.filter(pk__in=(1,2,3,4,5,6,7,8)).order_by('?')[0] return fact def get_route(): routes = Route.objects.all().order_by('?')[0] return routes class Command(BaseCommand): help = 'Creates a series of bus occurrences.' option_list = BaseCommand.option_list + ( make_option('--quantity', default=10, dest = 'qtd', help='Quantity of occurrences'), ) def handle(self, *args, **options): quantity = int(options['qtd']) for i in range(1, quantity): occurrence = Occurrence() occurrence.date_time = get_date() occurrence.fact = get_bus_fact() occurrence.location = random_stop_position() occurrence.route = get_route() occurrence.comment = random_comment() occurrence.photo = random_photo() occurrence.save()
UTF-8
Python
false
false
2,013
10,866,267,259,100
de96b777908b042abc3c28130b2d02f5abe8623d
39e647e9ec8524a1cee90ef15f37a3d3bbf8ac43
/poet/branches/Demo20100725/.svn/text-base/ReportFindings.py.svn-base
6897d86d3c9fd088aa0fff31ee89afc003d2dc1d
[]
no_license
AgileAdaptiveTools/POETTools
https://github.com/AgileAdaptiveTools/POETTools
85158f043e73b430c1d19a172b75e028a15c2018
60244865dd850a3e7346f9c6c3daf74ea1b02448
refs/heads/master
2021-01-18T14:46:08.025574
2013-01-28T19:18:11
2013-01-28T19:18:11
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
#!/usr/bin/python #################################################################################### # # # Copyright (c) 2003 Dr. Conan C. Albrecht # # # # This file is part of GroupMind. # # # # GroupMind is free software; you can redistribute it and/or modify # # it under the terms of the GNU General Public License as published by # # the Free Software Foundation; either version 2 of the License, or # # (at your option) any later version. # # # # GroupMind is distributed in the hope that it will be useful, # # but WITHOUT ANY WARRANTY; without even the implied warranty of # # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # # GNU General Public License for more details. # # # # You should have received a copy of the GNU General Public License # # along with Foobar; if not, write to the Free Software # # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA # # # #################################################################################### from BaseView import BaseView from Constants import * from Events import Event import sys import datagate import xml.dom.minidom import time import Directory class ReportFindings(BaseView): NAME = 'Report Findings' def __init__(self): BaseView.__init__(self) self.interactive = True def send_content(self, request): # Sends content of page request.writeln(HTML_HEAD_NO_CLOSE + '<link type="text/css" rel="stylesheet" href="' + join(WEB_PROGRAM_URL, "layout.css") + '" />') request.writeln(''' <script type="text/javascript" src="''' + join(WEB_PROGRAM_URL, "jsapi.js") + '''"></script> <script type="text/javascript" src="''' + join(WEB_PROGRAM_URL, "corechart.js") + '''"></script> <script type="text/javascript" src="''' + join(WEB_PROGRAM_URL, "default,corechart.I.js") + '''"></script> <script src="''' + join(WEB_PROGRAM_URL, 'jquery-1.4.2.min.js') + '''"></script>' <script src="''' + join(WEB_PROGRAM_URL, 'jquery-ui-1.8.2.custom.min.js') + '''"></script> <link href="''' + join(WEB_PROGRAM_URL, 'jquery-ui-1.8.2.custom.css') + '''" rel="stylesheet" type="text/css"/> <!--<script type="text/javascript" src="http://www.google.com/jsapi"></script>--> <script type="text/javascript"> $(function() { $("input:button, input:submit").button(); }); var chart; //google.setOnLoadCallback(drawChart); //google.load("visualization", "1", {packages:["corechart"]}); function drawChart() { try { var data = new google.visualization.DataTable(); data.addColumn('string', 'Category'); data.addColumn('number', 'PM'); data.addColumn('number', 'User'); data.addColumn('number', 'PMO'); data.addColumn('number', 'Contracter'); data.addColumn('number', 'Sr Stakeholder'); data.addRows(6); data.setValue(0, 0, 'Trust'); data.setValue(0, 1, 1000); data.setValue(0, 2, 400); data.setValue(0, 3, 700); data.setValue(0, 4, 900); data.setValue(0, 5, 500); data.setValue(1, 0, 'Commitment'); data.setValue(1, 1, 610); data.setValue(1, 2, 860); data.setValue(1, 2, 690); data.setValue(1, 3, 970); data.setValue(1, 4, 750); data.setValue(1, 5, 500); data.setValue(2, 0, 'SA'); data.setValue(2, 1, 660); data.setValue(2, 2, 1120); data.setValue(2, 2, 400); data.setValue(2, 3, 700); data.setValue(2, 4, 900); data.setValue(2, 5, 500); data.setValue(3, 0, 'Agility'); data.setValue(3, 1, 1030); data.setValue(3, 2, 540); data.setValue(3, 2, 400); data.setValue(3, 3, 700); data.setValue(3, 4, 900); data.setValue(3, 5, 500); data.setValue(4, 0, 'Teamwork'); data.setValue(4, 1, 1030); data.setValue(4, 2, 540); data.setValue(4, 2, 400); data.setValue(4, 3, 700); data.setValue(4, 4, 900); data.setValue(4, 5, 500); data.setValue(5, 0, 'Complexity'); data.setValue(5, 1, 1030); data.setValue(5, 2, 540); data.setValue(5, 2, 400); data.setValue(5, 3, 700); data.setValue(5, 4, 900); data.setValue(5, 5, 500); chart = new google.visualization.BarChart(document.getElementById('chartDiv')); chart.draw(data, { width: 900, height: 800, title: 'Survey ABC Results', vAxis: { title: 'Category', titleColor: '#000' }, hAxis: { title: 'Level of Concern', titleColor: '#000' }, isStacked:false }); } catch(e){ alert(e); } } </script> </head>''') request.writeln('<body onLoad="getFindings();">') #request.writeln(HTML_BODY) request.writeln(''' <script language='JavaScript' type='text/javascript'> function getFilter(){ var user_filter = []; for (i=0; i<document.getElementById('user-filter').options.length; i++){ if (document.getElementById('user-filter').options[i].selected == true){ user_filter = user_filter.concat(document.getElementById('user-filter').options[i].text); } } if (user_filter == []){ user_filter = ['All']; } var poet_filter = []; for (i=0; i<document.getElementById('poet-filter').options.length; i++){ if (document.getElementById('poet-filter').options[i].selected == true){ poet_filter = poet_filter.concat(document.getElementById('poet-filter').options[i].text); } } if (poet_filter == []){ poet_filter = ['All']; } var set_filter = []; for (i=0; i<document.getElementById('set-filter').options.length; i++){ if (document.getElementById('set-filter').options[i].selected == true){ set_filter = set_filter.concat(document.getElementById('set-filter').options[i].text); } } if (set_filter == []){ set_filter = ['All']; } return [user_filter, poet_filter, set_filter]; } function getFindings(){ filter = getFilter(); sendEvent('get_findings', filter); } function viewFindings(checkedCategories){ drawChart(); //document.getElementById('content').innerHTML = checkedCategories; } function openHelp() { window.open("''' + WEB_PROGRAM_URL + '''/Help/", "helpwindow", "dependent,height=400,width=300,scrollbars,resizable"); return false; } function exportCSV() { filter = [["All"], ["All"], ["All"]]; sendEvent('exportCSV', filter); } function exportFilteredCSV() { filter = getFilter(); sendEvent('exportCSV', filter); } </script> ''') # HTML for page # '''determines whether a given user is the PM of a given meeting''' activity = Directory.get_meeting(request.getvalue('global_rootid', '')) activities = activity.get_parent() meeting = activities.get_parent() user_is_pm = False for child in meeting: if child.name == "groups": for group in child: if group.name == "PM": for pm_item in group: if pm_item.user_id == request.session.user.id: user_is_pm = True if request.session.user.superuser == '1' or user_is_pm: request.writeln('<table cellspacing="0" style="border-bottom:#99ccff 1px dotted;padding:3px;" width=100%><tr>') request.writeln('''<td id="menu-logo"> <div id="poet-logo">POET</a> </td>''') request.writeln('<td id="user-menu">') request.writeln('logged in as <strong>'+request.session.user.name+'</strong>') #navigation if request.session.user.superuser == '1': request.writeln('<span class="divider">|</span> <a href="' + request.cgi_href(_adminaction=None, global_adminview=None) + '">Home</a>') request.writeln(' <span class="divider">|</span> <a target="_top" href="' + request.cgi_href(itemid=meeting.id, global_view='Administrator', global_adminview='POET') + '">Edit</a>') request.writeln('''<span class="divider">|</span> <a onclick='javascript:openHelp();'>Help</a> <span class="divider">|</span> ''') request.writeln('<a href="' + request.cgi_href(global_view='login', _adminaction='logout') + '">Logout</a>') request.writeln('</td>') request.writeln('</tr></table>') for activity in activities: if activity.name == "Question Editor": sets = activity.search1(name="sets") break request.writeln(''' <br/> <div id="container"> <div id="reportFindings" class="module"> <h1 style='float:left;'>Findings</h1> </br></br></br><input type="button" value="Export CSV" onclick="javascript:exportCSV();"> </br><input type="button" value="Export Filtered CSV" onclick="javascript:exportFilteredCSV();"> <select size="5" style='float:right;' onchange="getFindings()" id="set-filter" multiple> <option value="All" selected>All</option>''') for s in sets: request.writeln('''<option value="'''+s.name+'''">'''+s.name+'''</option>''') request.writeln(''' </select> <select size="5" style='float:right;' onchange="getFindings()" id="poet-filter" multiple> <option value="All" selected>All</option> <option value="Political">Political</option> <option value="Operational">Operational</option> <option value="Economic">Economic</option> <option value="Technical">Technical</option> </select> <select size="5" style='float:right;' onchange="getFindings()" id="user-filter" multiple> <option value="All" selected>All</option> <option value="PM">PM</option> <option value="PMO">PMO</option> <option value="Contractor">Contractor</option> <option value="Senior Stakeholder">Senior Stakeholder</option> <option value="User">User</option> </select> <br/><br/> <div id="content" style="clear:both;"> <div id="chartDiv"></div> </div><!-- /#content --> </div><!-- /#reportFindings --> </div><!-- /#container --> ''') request.writeln("<script language='JavaScript' type='text/javascript'>startEventLoop();</script>") request.writeln("</body></html>") ################################################ ### Action methods (called from Javascript) def export(self, doc, root, q): # helper function to build correct xml of question ques = root.appendChild(doc.createElement('question')) ques.setAttribute('id', q.id) text = ques.appendChild(doc.createElement('text')) text.appendChild(doc.createTextNode(q.text)) descrip = ques.appendChild(doc.createElement('description')) descrip.appendChild(doc.createTextNode(q.descrip)) ansFormat = ques.appendChild(doc.createElement('format')) ansFormat.appendChild(doc.createTextNode(q.format)) users = ques.appendChild(doc.createElement('users')) allUsers = '' for u in q.users: allUsers += `u` users.appendChild(doc.createTextNode(allUsers)) comment = ques.appendChild(doc.createElement('comment')) comment.appendChild(doc.createTextNode(q.comment)) comOpt = ques.appendChild(doc.createElement('comOpt')) comOpt.appendChild(doc.createTextNode(q.comOpt)) options = q.search1(name="options") allOptions = options.get_child_items(self) opts = ques.appendChild(doc.createElement('options')) opts.setAttribute('id', options.id) num = opts.appendChild(doc.createElement('num_selections')) num.appendChild(doc.createTextNode(str(options.num_selections))) for o in allOptions: opt = opts.appendChild(doc.createElement('option')) opt.setAttribute('id', o.id) text = opt.appendChild(doc.createElement('text')) text.appendChild(doc.createTextNode(o.text)) """ gories = q.search1(name="categories") allCs = gories.get_child_items(self) cats = ques.appendChild(doc.createElement('categories')) cats.setAttribute('id', gories.id) for cat in allCs: ca = cats.appendChild(doc.createElement('category')) ca.setAttribute('id', cat.id) ca.appendChild(doc.createTextNode(cat.name)) """ poet = q.search1(name="poet") allPoet = poet.get_child_items(self) poetCts = ques.appendChild(doc.createElement('poet')) poetCts.setAttribute('id', poet.id) for p in allPoet: t = poetCts.appendChild(doc.createElement('factor')) t.setAttribute('id', p.id) t.appendChild(doc.createTextNode(p.name)) sets = q.search1(name="sets") allSets = sets.get_child_items(self) tag = ques.appendChild(doc.createElement('sets')) tag.setAttribute('id', sets.id) for s in allSets: ca = tag.appendChild(doc.createElement('set')) ca.setAttribute('id', s.id) ca.appendChild(doc.createTextNode(s.name)) answers = q.search1(name="answers") allAnswers = answers.get_child_items(self) answs = ques.appendChild(doc.createElement('answers')) answs.setAttribute('id', answers.id) for a in allAnswers: answ = answs.appendChild(doc.createElement('answer')) answ.setAttribute('id', a.id) ans = answ.appendChild(doc.createElement('answer')) ans.appendChild(doc.createTextNode(a.answer)) who = answ.appendChild(doc.createElement('who')) who.appendChild(doc.createTextNode(a.who)) when = answ.appendChild(doc.createElement('when')) when.appendChild(doc.createTextNode(a.when)) comment = answ.appendChild(doc.createElement('comment')) comment.appendChild(doc.createTextNode(a.comment)) return doc def get_findings_action(self, request, filterChoice): meeting = Directory.get_meeting(request.getvalue('global_rootid', '')) parent = meeting.get_parent() activities = parent.search1(view='questioneditor') questions = activities.search1(name="questions") doc = xml.dom.minidom.Document() root = doc.appendChild(doc.createElement("QuestionSystem")) meta = root.appendChild(doc.createElement('meta')) date = meta.appendChild(doc.createElement('exportdate')) date.appendChild(doc.createTextNode(time.strftime('%a, %d %b %Y %H:%M:%S'))) quesRoot = root.appendChild(doc.createElement('questions')) xmlDoc = doc #log.info("*** START OF REPORT ***") #log.info("filterChoice = "+str(filterChoice)) # Iterate through all questions, filter out the questions that match the categories count = 0 #only for debugging (but could be useful later) for q in questions: #log.info(" --- QUESTION --- ") users = q.users poet = [] sets = [] for qchild in q: if qchild.name == "poet": for p in qchild: poet.append(p.name) elif qchild.name == "sets": for s in qchild: sets.append(s.name) #log.info("Users: "+str(users)+" vs. "+str(filterChoice[0])) #log.info("Poet: "+str(poet)+" vs. "+str(filterChoice[1])) #log.info("Sets: "+str(sets)+" vs. "+str(filterChoice[2])) #these three checks could be rewritten as three calls to a function that takes two lists and returns True if there is any shared element # check users if 'All' in filterChoice[0]: includeUsers = True else: includeUsers = False for filterUser in filterChoice[0]: if filterUser in users: includeUsers = True break # check poet if 'All' in filterChoice[1]: includePoet = True else: includePoet = False for filterPoet in filterChoice[1]: if filterPoet in poet: includePoet = True break # check categories if 'All' in filterChoice[2]: includeSet = True else: includeSet = False for filterSet in filterChoice[2]: if filterSet in sets: includeSet = True break #If you want to force a question to match every element of a filter, use this logic instead: """ includeUsers = True #bool starts as true instead of false for filterUser in filterChoice[0]: if filterUser not in users: #check for "not in" as opposed to "in" includeUsers = False break """ #log.info(str(includeUsers)+str(includePoet)+str(includeSet)) if includeUsers and includePoet and includeSet: xmldoc = ReportFindings.export(self, doc, quesRoot, q) count += 1 #q_count+=1 #log.info(" ---------------- ") #log.info("# of matches: "+str(count)) #log.info("**** END OF REPORT ****") f = open('qaDoc.xml','w') print >> f, xmlDoc.toxml() requestedQuestion = [] events = [] events.append(Event('viewFindings', xmlDoc.toxml())) return events def exportCSV_action(self, request, filters): meeting = Directory.get_meeting(request.getvalue('global_rootid', '')) parent = meeting.get_parent() meetingRoot = parent.get_parent() questioneditor = parent.search1(view='questioneditor') questions = questioneditor.search1(name="questions") groups = meetingRoot.search1(name='groups') userDictionary = {} for group in groups: userDictionary[group.name] = [] for user in group: userDictionary[group.name].append(user.user_id) group_filter = filters[0] poet_filter = filters[1] sets_filter = filters[2] # Step 1: Create a dictionary with a key for every existing combination of [POET]x[Set]x[Group]. # Each key's entry will be a list of question ids that belong to that combination. # This dictionary acts as a "master list" for ordering purposes. qLists = {} for q in questions: #Please feel free to change these variable names if you come up with something better q_poet = [] #the question's poet factors q_poetNode = q.search1(name='poet') for q_p in q_poetNode: q_poet.append(q_p.name) if not q_poet: # if q_poet == [] q_poet = ["None"] #this is only necessary for POET factors, because a question with no sets/groups can't be asked if not "All" in poet_filter: #change this to "elif", and questions without a POET factor will survive the filter anyway q_poet = filter(lambda x:x in q_poet, poet_filter) q_sets = [] #the question's sets q_setsNode = q.search1(name='sets') for q_set in q_setsNode: q_sets.append(q_set.name) if not "All" in sets_filter: #"all" is not in the filter set q_sets = filter(lambda x:x in q_sets, sets_filter) q_groups = q.users #the queston's groups if not "All" in group_filter: #"all" is not in the filter set q_groups = filter(lambda x:x in q_groups, group_filter) for qp in q_poet: #for... for qs in q_sets: #every... for qg in q_groups: #combination: try: qLists[qp+qs+qg].append(q.id) # add it to the relevant list except KeyError: #entry doesn't exist yet qLists[qp+qs+qg] = [q.id] # Step 2: Create a dictionary with a key for every combination of [User] x [POET] x [Set] x [Group]. # Populate it with each entry a list of ints, with ints corresponding to answers to questions. # This is almost exactly what the final CSV will look like. answerData = {} t = {'stronglydisagree': 1, 'disagree': 2, 'somewhatdisagree': 3, 'neither': 4, 'somewhatagree': 5, 'agree': 6, 'stronglyagree': 7} #translates answers into numbers for q in questions: q_poet = [] #the question's poet factors q_poetNode = q.search1(name='poet') for q_p in q_poetNode: q_poet.append(q_p.name) if not q_poet: # if q_poet == [] q_poet = ["None"] #this is only necessary for POET factors, because a question with no sets/groups can't be asked if not "All" in poet_filter: #change this to "elif", and questions without a POET factor will survive the filter anyway q_poet = filter(lambda x:x in q_poet, poet_filter) q_sets = [] #the question's sets q_setsNode = q.search1(name='sets') for q_set in q_setsNode: q_sets.append(q_set.name) if not "All" in sets_filter: #"all" is not in the filter set q_sets = filter(lambda x:x in q_sets, sets_filter) q_groups = q.users #the question's groups if not "All" in group_filter: #"all" is not in the filter set q_groups = filter(lambda x:x in q_groups, group_filter) answers = q.search1(name='answers') #all the answers that question has received for answer in answers: #for every individual answer... user = answer.who #who answered it... user_id = answer.creatorid for qp in q_poet: #and what... for qs in q_sets: #categories it... for qg in q_groups: #belongs to: if user_id in userDictionary[qg]: #ignore the groups the user doesn't belong to index = qLists[qp+qs+qg].index(q.id) #fetch the index from the master list entry = user+"|"+qp+"|"+qs+"|"+qg #compose a name with "|" marks for division later try: answerData[entry][index] = t[answer.answer] #update the appropriate column of the row except KeyError: #that row doesn't exist yet -- so make it answerData[entry] = [0] * len(qLists[qp+qs+qg]) #a zero for every question belonging to the poet/set/group answerData[entry][index] = t[answer.answer] # Step 3: Create the CSV file. # Each key of the dictionary created in Step 3 will be transformed into a row of the CSV. csv = "Username, POET Factor, Set, Group\n" #the header for key in answerData.keys(): #each of these will be a row in the final file keySplit = key.split('|') #"Alan|Political|Mandatory|PM" -> ["Alan", "Political", "Mandatory", "PM"] string = "{user}, {poet}, {set}, {group}".format( user=keySplit[0], poet=keySplit[1], set=keySplit[2], group=keySplit[3]) #the key becomes the first four entries of the row for answer in answerData[key]: if answer > 0: string += ", "+str(answer) #if the user answered, add that answer to the end of the row else: string += ", " #if the user didn't answer, leave that slot blank string += "\n" #move to next row csv += string #add to CSV log.info("csv:\n"+csv) #debug f = open('POET.csv','w') print >> f, csv events = [] return events ####################################### ### Window initialization methods def get_initial_events(self, request, rootid): '''Retrieves a list of initial javascript calls that should be sent to the client when the view first loads. Typically, this is a series of add_processor events.''' meeting = Directory.get_meeting(request.getvalue('global_rootid', '')) parent = meeting.get_parent() activities = parent.search1(view='questioneditor') events = [] allQuestions = [] for child in activities.search1(name="questions"): item = datagate.get_item(child.id) options = item.search1(name="options") allChoices = options.get_child_items(self) allOptions = [] for choice in allChoices: allOptions.append(choice.text) allQuestions.append([child.id, child.text, child.format, child.comment, allOptions, options.num_selections, child.comOpt]) return events def initialize_activity(self, request, new_activity): '''Called from the Administrator. Sets up the activity''' BaseView.initialize_activity(self, request, new_activity)
UTF-8
Python
false
false
2,013
12,919,261,663,619
ee4df12b2afc98fe20f2f095eeac3f3af21d08b3
15fd40caf9143d9f313846e25f86d37126bdc759
/sut_tools/installTests.py
fb34d97cd17dfdff855031f850b11627ba528c60
[]
no_license
lsblakk/tools
https://github.com/lsblakk/tools
e07ae50d7bb14a95241c64e83133d14493c7b3d2
a427d8b2790350b524b66ac14534fd836ae10476
HEAD
2016-09-06T10:13:15.460163
2012-03-28T23:13:23
2012-03-28T23:13:23
2,156,912
1
1
null
null
null
null
null
null
null
null
null
null
null
null
null
#!/usr/bin/env python import os, sys import devicemanagerSUT as devicemanager if (len(sys.argv) <> 3): print "usage: install.py <ip address> <localfilename>" sys.exit(1) print "connecting to: " + sys.argv[1] dm = devicemanager.DeviceManagerSUT(sys.argv[1]) devRoot = dm.getDeviceRoot() source = sys.argv[2] filename = os.path.basename(source) target = os.path.join(devRoot, filename) dm.pushFile(source, target) dm.unpackFile(target)
UTF-8
Python
false
false
2,012
18,889,266,172,522
269a1e0caf5e0ac25ae4bf643ca1022ce80beebc
ac781bfeb6e4cb47e9cbd4d25f848734f3d30097
/callison/day2.python.com/controllers/login.py
6f222ff7f774eb28453b7c525751cfd9912853af
[]
no_license
tbsmithFS/SSL1308
https://github.com/tbsmithFS/SSL1308
3e435242797f6c691e7116e0468c3f208875af98
d2a5b79c78bf2048670352dbfc6611bf091d237e
refs/heads/master
2021-01-01T16:40:24.282430
2013-08-29T02:17:26
2013-08-29T02:17:26
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
#!/usr/bin/python from models.view import View class Login(): def get(self, pairs, data={}): if 'action' not in pairs: action = 'home' else: action = pairs.getvalue('action') view_model = View() view_model.print_header() data = {'site_title' : "My Python website title - login", 'logo_title' : "My Python logo", 'main_body_text' : "Welcome to the login page", 'copyright_info' : "Callisonification &copy; 2013",} view_model.get_view("header", data); view_model.get_view("nav", data); view_model.get_view("body", data); view_model.get_view("footer", data);
UTF-8
Python
false
false
2,013
3,032,246,924,034
b7bede457076f3466eafdc414f01bd3c4607dc0d
a7d24f1685d5e5fffc57bd08fd3194ca94277b12
/.config/sublime-text-2/Packages/Package Control/package_control/providers/package_provider.py
7928e6504b7d82f8a03c19d2ad5e72b8c063eafb
[ "LGPL-2.0-or-later", "LGPL-2.1-or-later", "MIT" ]
non_permissive
tekezo/dot-files
https://github.com/tekezo/dot-files
6cbe6a8e4c04ef5a756da86d36da6d694da1ced6
de05d734b9669004785ef63ce68ecb76fc4b102b
refs/heads/master
2021-01-15T21:49:37.303834
2013-04-07T11:19:03
2013-04-07T11:19:03
12,126,779
2
0
null
null
null
null
null
null
null
null
null
null
null
null
null
import json import re from ..console_write import console_write from .platform_comparator import PlatformComparator class PackageProvider(PlatformComparator): """ Generic repository downloader that fetches package info With the current channel/repository architecture where the channel file caches info from all includes repositories, these package providers just serve the purpose of downloading packages not in the default channel. The structure of the JSON a repository should contain is located in example-packages.json. :param repo: The URL of the package repository :param package_manager: An instance of :class:`PackageManager` used to download the file """ def __init__(self, repo, package_manager): self.repo_info = None self.repo = repo self.package_manager = package_manager self.unavailable_packages = [] def match_url(self): """Indicates if this provider can handle the provided repo""" return True def fetch_repo(self): """Retrieves and loads the JSON for other methods to use""" if self.repo_info != None: return repository_json = self.package_manager.download_url(self.repo, 'Error downloading repository.') if repository_json == False: self.repo_info = False return try: self.repo_info = json.loads(repository_json) except (ValueError): console_write(u'Error parsing JSON from repository %s.' % self.repo, True) self.repo_info = False def get_packages(self): """ Provides access to the repository info that is cached in a channel :return: A dict in the format: { 'Package Name': { # Package details - see example-packages.json for format }, ... } or False if there is an error """ self.fetch_repo() if self.repo_info == False: return False output = {} for package in self.repo_info['packages']: platforms = package['platforms'].keys() best_platform = self.get_best_platform(platforms) if not best_platform: self.unavailable_packages.append(package['name']) continue # Rewrites the legacy "zipball" URLs to the new "zip" format downloads = package['platforms'][best_platform] rewritten_downloads = [] for download in downloads: download['url'] = re.sub( '^(https://nodeload.github.com/[^/]+/[^/]+/)zipball(/.*)$', '\\1zip\\2', download['url']) rewritten_downloads.append(download) info = { 'name': package['name'], 'description': package.get('description'), 'url': package.get('homepage', self.repo), 'author': package.get('author'), 'last_modified': package.get('last_modified'), 'downloads': rewritten_downloads } output[package['name']] = info return output def get_renamed_packages(self): """:return: A dict of the packages that have been renamed""" return self.repo_info.get('renamed_packages', {}) def get_unavailable_packages(self): """ Provides a list of packages that are unavailable for the current platform/architecture that Sublime Text is running on. This list will be empty unless get_packages() is called first. :return: A list of package names """ return self.unavailable_packages
UTF-8
Python
false
false
2,013
2,817,498,570,177
0c281a1f5485f670b618c144b999b2a601928377
8803d87c5a62e9a6d0b8895375c3443559894591
/mlsvm.py
fac0dc17dcff26cb9c86c8750567bcb7d6be4113
[]
no_license
Tomlong/MLlib-UI
https://github.com/Tomlong/MLlib-UI
f615f8926165e0ce57f07191a692403d0fcc15e5
338e6c50ce736dee963cb300011d6618d7d5801d
refs/heads/master
2021-01-02T09:33:24.119051
2014-08-29T02:40:53
2014-08-29T02:40:53
23,449,998
4
0
null
null
null
null
null
null
null
null
null
null
null
null
null
import sys import numpy as np import matplotlib.pyplot as plt from normalize import norm from sklearn.decomposition import PCA from pyspark import SparkContext from pyspark.mllib.regression import LabeledPoint as lbp from pyspark.mllib.classification import SVMWithSGD as svm def SVMModel(dataPath, label, max_label, min_label, character, master, normalize, ispca): pca_n = 2 sc = SparkContext(master) data = sc.textFile(dataPath) mid_label = (float(max_label) + float(min_label)) / 2.0 print data.map(lambda line: line.split(character)).collect() ndata = data.map(lambda line: line.split(character)).map(lambda part: (map(lambda x: float(x) ,part[0: len(part)]))) if label == 0: ndata = ndata.map(lambda line: line[::-1]) if normalize == 1: test_data = norm(ndata.collect()) norm_data = sc.parallelize(test_data) train_data = norm_data.map(lambda part: lbp([1.0 if float(part[0]) > mid_label else 0.0][0], part[1])) test_data = norm_data.map(lambda part: ([1.0 if float(part[0]) > mid_label else 0.0][0], part[1])).collect() else: train_data = ndata.map(lambda part: lbp([1.0 if float(len(part) - 1) > mid_label else 0.0][0], part[0: len(part) - 1])) test_data = ndata.map(lambda part: ([1.0 if float(part[len(part) - 1]) > mid_label else 0.0][0], part[0:len(part) - 1])).collect() if ispca == 1: pca = PCA(n_components = pca_n) pca_train = [test_data[i][1] for i in range(len(test_data))] pca_data = pca.fit(pca_train).transform(pca_train) test = [] for i in range(len(pca_data)): test.append([test_data[i][0], pca_data[i]]) train_data = sc.parallelize(test).map(lambda part: lbp(part[0], part[1])) test_data = test model_svm = svm.train(train_data) acc_svm = 0 err_lrg = 0.0 size = len(train_data.collect()) for i in range(size): if model_svm.predict(test_data[i][1]) == test_data[i][0]: acc_svm += 1 String = "SVM Result:\n" String = String + str(model_svm.weights) + "\n" String = String + str((float(acc_svm)/ float(size)) * 100) + "%" x = [] y = [] showpic = 0 if len(test_data[0][1]) == 2: ispca = 1 if ispca == 1: for i in range(size): if test_data[i][0] == 0.0: plt.plot(test_data[i][1][0], test_data[i][1][1], 'ro', color = 'r', markersize = 8) elif test_data[i][0] == 1.0: plt.plot(test_data[i][1][0], test_data[i][1][1], 'ro', color = 'b', markersize = 8) test = sc.parallelize(test_data) max_axis = test.map(lambda part: part[1][0]).max() min_axis = test.map(lambda part: part[1][0]).min() plt.plot([min_axis, max_axis], [max_axis * model_svm.weights[0] + model_svm.weights[1], min_axis * model_svm.weights[0] + model_svm.weights[1]], 'g-', linewidth= 2) plt.savefig('result.jpg') plt.close('all') showpic = 1 sc.stop() return (showpic, String)
UTF-8
Python
false
false
2,014
747,324,355,846
ad85606eb94dd4a76c57796e2e87f7e772f1a892
2248c356664df8ddede6cddcc6c7e62772551117
/python/lang_sample.py
4c67a233e6d6ec8145cafe3919e5c166891a532e
[]
no_license
abishop42/sample
https://github.com/abishop42/sample
d414fda94b4a07ab972ecf8194e4a5eddeee5cf3
4d7e9f0ee402f77acfd76458d07524338da33bc1
refs/heads/master
2021-01-10T14:46:05.497080
2014-10-27T18:35:53
2014-10-27T18:35:53
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
equals_opertators = ["is", "==","equal","="] opertators = ["is","not","greater","than","less","equal","to", "<", "<=", "==", ">", ">=", "!=", "like", "contains","equal","="] combines = ["and", "or"] class QuerryString(): def __init__(self,ops=[],com=[]): self.opertators = ops self.combines = com self.query = "" def parse(self, input_string, delim=' '): result = [] initial = input_string.split(delim) split_pos = [i for i in range(len(initial)) if initial[i] in combines] temp = [] opp = ["and","or"] part = query.split(' ') m = [i for i in range(len(part)) if part[i].lower() in opp] m.append(-1) pos = 0 for i in m: if part[pos] in opp: temp.append(part[pos]) pos = pos + 1 if i == -1: temp.append(part[pos:]) else: temp.append(part[pos:i]) pos = i result = [] for t in temp: result.append(self.combine_operators(t)) self.query = result return result def combine_operators(self, input_object): result = input_object if type([]) == type(input_object): result = [] opp = [] for i in range(len(input_object)): if input_object[i] in opertators: opp.append(input_object[i]) else: if len(opp) > 0: result.append(" ".join(opp)) opp = [] result.append(input_object[i]) return result def check_object(self, obj): result = [] for o in obj: status = [] if type(o) == type({}): for q in self.query: if type(q) == type([]): if q[1] in ["is","equal","==", "="]: status.append("%s" % (o[q[0]] == q[2])) elif q[1] in ["is not","!="]: status.append("%s" % (not(o[q[0]] == q[2]))) elif q[1] in ["greater than", ">"]: status.append("%s" % (float(o[q[0]]) > float(q[2]))) elif q[1] in ["greater than equal to", ">="]: status.append("%s" % (float(o[q[0]]) >= float(q[2]))) elif q[1] in ["less than", "<"]: status.append("%s" % (float(o[q[0]]) > float(q[2]))) elif q[1] in ["less than equal to", "<="]: status.append("%s" % (float(o[q[0]]) <= float(q[2]))) elif q in ["and", "or"]: status.append(q) print ("overall => " + " ".join(status)) overall = eval(" ".join(status)) print (overall) if overall == True: result.append(o) return result if __name__ == "__main__": input_object = [ {"input_key":"1111","name":"fred","address":"some street", "suburb":"aplace","postcode":"9999"}, {"input_key":"1112","name":"fred","address":"some street", "suburb":"aplace","postcode":"9991"}, {"input_key":"1112","name":"blah","address":"some street", "suburb":"donuts","postcode":"9991"}, {"input_key":"2222","name":"fred","address":"some street", "suburb":"donuts","postcode":"9992"} ] query = "postcode == 9992 and name == blah or suburb = donuts" for i in input_object: print (i) q = QuerryString(opertators, combines) print(q.parse(query)) print ("*** RESULTS ***") for r in q.check_object(input_object): print (r)
UTF-8
Python
false
false
2,014
1,460,288,903,810
d3d95ab1e7e1cf6069382faff2db6ebb644744fd
45af6ec2ef1444817e64a1c27c781e23a52f744d
/upload_all_the_things/challenge1.py
1d1d0c3b12f788dd6cee968746925d47c3e9f3d6
[]
no_license
Trietptm-on-Coding-Algorithms/2014_Fall_CTF_Week_1
https://github.com/Trietptm-on-Coding-Algorithms/2014_Fall_CTF_Week_1
130e791bfc3f6fd331a0a6d88c74ec8c8bdb835c
193272ec6b42fbb48e07e3a9423c6f26bbaa1760
refs/heads/master
2020-05-14T12:10:07.620717
2014-09-04T00:53:44
2014-09-04T00:53:44
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
from flask import Flask, render_template, redirect, request, send_from_directory from flask_wtf import Form from wtforms import StringField from wtforms.validators import DataRequired from werkzeug import secure_filename from flask_wtf.file import FileField import glob app = Flask(__name__) app.debug = True app.secret_key = "ALNFNAI*)*@$)(NMD)(N@D)(J)(@E" class MyForm(Form): name = StringField('name', validators=[DataRequired()]) class PhotoForm(Form): photo = FileField('Your photo') @app.route('/upload/', methods=('GET', 'POST')) def upload(): form = PhotoForm() if form.validate_on_submit(): filename = form.photo.data.filename form.photo.data.save('uploads/' + filename) else: filename = None list_of_files = glob.glob('uploads/*.png') return render_template('upload.html', form=form, filename=filename, files=list_of_files) @app.route('/uploads/<filename>') def send_file(filename): return send_from_directory('uploads/', filename) @app.route('/') def hello_world(): return 'Hello World' if __name__ == '__main__': app.run(host='0.0.0.0')
UTF-8
Python
false
false
2,014
12,515,534,707,046
ce5276c99b542d7419106349c3ad19e33e342de4
a559ed0cd9aa8f1c650c5f2a073f7effc7061e73
/pymegacli.py
9d7be4f891e058a2496926cbb58c3a9d7c42fa42
[]
no_license
williamjoy/pymegacli
https://github.com/williamjoy/pymegacli
754f8357aedc9659363277dca694858f959952db
b472496ad8b31f02a7cdb6c60ade3154e928420a
refs/heads/master
2019-01-01T04:34:57.316992
2010-07-08T11:39:08
2010-07-08T11:39:08
34,435,299
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
# # Copyright (c) 2010, Giovanni P. Tirloni <[email protected]> # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # - Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # # - Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from subprocess import Popen, PIPE MEGACLI='/usr/local/sas/utilities/MegaCli' class Adapter: device_id = 0 product_name = '' serial_number = '' fw_package_build = '' fw_version = '' bios_version = '' webbios_version = '' preboot_cli_version = '' boot_block_version = '' sas_address = '' bbu_present = False alarm_present = False nvram_present = False serial_debugger_present = False memory_present = False flash_present = False memory_size = '' def load(self, adapter_id): try: ret = megacli('-AdpAllInfo -a%i -NoLog' % adapter_id) except OSError: print 'Failed to get adapter information (MegaCli -AdpAllInfo)' return 0 for line in ret.readlines(): if line[0:9] == 'Adapter #': self.device_id = int(clean_nl(line[9:])) if line[0:12] == 'Product Name': self.product_name = clean_nl(line[18:]) elif line[0:9] == 'Serial No': self.serial_number = clean_nl(line[18:]) elif line[0:16] == 'FW Package Build': self.fw_package_build = clean_nl(line[18:]) elif line[0:10] == 'FW Version': self.fw_version = clean_nl(line[21:]) elif line[0:12] == 'BIOS Version': self.bios_version = clean_nl(line[21:]) elif line[0:15] == 'WebBIOS Version': self.webbios_version = clean_nl(line[21:]) elif line[0:19] == 'Preboot CLI Version': self.preboot_cli_version = clean_nl(line[21:]) elif line[0:18] == 'Boot Block Version': self.boot_block_version = clean_nl(line[21:]) elif line[0:11] == 'SAS Address': self.sas_address = clean_nl(line[18:]) elif line[0:3] == 'BBU': self.bbu_present = is_present(line[18:]) elif line[0:5] == 'Alarm': self.alarm_present = is_present(line[18:]) elif line[0:5] == 'NVRAM': self.nvram_present = is_present(line[18:]) elif line[0:15] == 'Serial Debugger': self.serial_debugger_present = is_present(line[18:]) elif line[0:8] == 'Memory ': self.memory_present = is_present(line[18:]) elif line[0:11] == 'Memory Size': self.memory_size = clean_nl(line[18:]) def show(self): ret = """Device ID : %d Product Name : %s Serial Number : %s FW Package Build : %s FW Version : %s BIOS Version : %s WebBIOS Version : %s Preboot CLI Version : %s Boot Block Version : %s SAS Address : %s BBU Present : %s Alarm Present : %s NVRAM Present : %s Serial Debugger Present : %s Memory Present : %s Flash Present : %s Memory Size : %s""" % (self.device_id, self.product_name, \ self.serial_number, self.fw_package_build, self.fw_version, \ self.bios_version, self.webbios_version, self.preboot_cli_version, \ self.boot_block_version, self.sas_address, self.bbu_present, \ self.alarm_present, self.nvram_present, self.serial_debugger_present, \ self.memory_present, self.flash_present, self.memory_size) print ret class Enclosure: device_id = 0 number_of_slots = 0 number_of_power_supplies = 0 number_of_fans = 0 number_of_temperature_sensors = 0 number_of_alarms = 0 number_of_sim_modules = 0 number_of_physical_drives = 0 status = '' position = 0 connector_name = '' partner_device_id = 0 def load_from_text(self, input): for line in input: if line[4:13] == 'Device ID': self.device_id = int(clean_nl(line[36:])) if line[4:19] == 'Number of Slots': self.number_of_slots = int(clean_nl(line[36:])) elif line[4:28] == 'Number of Power Supplies': self.number_of_power_supplies = int(clean_nl(line[36:])) elif line[4:18] == 'Number of Fans': self.number_of_fans = int(clean_nl(line[36:])) elif line[4:33] == 'Number of Temperature Sensors': self.number_of_temperature_sensors = int(clean_nl(line[36:])) elif line[4:20] == 'Number of Alarms': self.number_of_alarms = int(clean_nl(line[36:])) elif line[4:25] == 'Number of SIM Modules': self.number_of_sim_modules = int(clean_nl(line[36:])) elif line[4:29] == 'Number of Physical Drives': self.number_of_physical_drives = int(clean_nl(line[36:])) elif line[4:10] == 'Status': self.status = clean_nl(line[36:]) elif line[4:12] == 'Position': self.position = clean_nl(line[36:]) elif line[4:18] == 'Connector Name': self.connector_name = clean_nl(line[36:]) elif line[4:21] == 'Partner Device Id': self.partner_device_id = int(clean_nl(line[36:])) def show(self): ret = """Device ID : %i Number of Slots : %i Number of Power Supplies : %i Number of Fans : %i Number of Temperature Sensors : %i Number of Alarms : %i Number of SIM Modules : %i Number of Physical Drives : %i Status : %s Position : %s Connector Name : %s Partner Device Id : %i""" % (self.device_id, self.number_of_slots, \ self.number_of_power_supplies, self.number_of_fans, \ self.number_of_temperature_sensors, self.number_of_alarms, \ self.number_of_sim_modules, self.number_of_physical_drives, \ self.status, self.position, self.connector_name, self.partner_device_id) print ret class PhysicalDevice: adapter_id = 0 enclosure_id = 0 slot_id = 0 device_id = 0 sequence_number = 0 media_errors = 0 other_errors = 0 predictive_failures = 0 last_predictive_seq_number = 0 pd_type = '' raw_size = '' non_coerced_size = '' coerced_size = '' firmware_state = '' sas_address = '' connected_port_number = '' inquiry_data = '' fde_capable = '' fde_enable = '' secured = '' locked = '' foreign_state = '' device_speed = '' link_speed = '' media_type = '' def led_on(self): try: ret = megacli('-PdLocate -Start -PhysDrv[%i:%i] -a%i' % (self.enclosure_id, self.slot_id, self.adapter_id)) except OSError: print 'Failed to turn location LED on (MegaCli -PdLocate -Start)' return False return True def led_off(self): try: ret = megacli('-PdLocate -Stop -PhysDrv[%i:%i] -a%i' % (self.enclosure_id, self.slot_id, self.adapter_id)) except OSError: print 'Failed to turn location LED on (MegaCli -PdLocate -Stop)' return False return True def load_from_text(self, input): for line in input: if line[0:19] == 'Enclosure Device ID': self.enclosure_id = int(clean_nl(line[21:])) if line[0:11] == 'Slot Number': self.slot_id = int(clean_nl(line[13:])) elif line[0:9] == 'Device Id': self.device_id = int(clean_nl(line[11:])) elif line[0:15] == 'Sequence Number': self.sequence_number = int(clean_nl(line[17:])) elif line[0:17] == 'Media Error Count': self.media_errors = int(clean_nl(line[19:])) elif line[0:17] == 'Other Error Count': self.other_errors = int(clean_nl(line[19:])) elif line[0:24] == 'Predictive Failure Count': self.predictive_failures = int(clean_nl(line[26:])) elif line[0:40] == 'Last Predictive Failure Event Seq Number': self.last_predictive_failure_seq_number = int(clean_nl(line[42:])) elif line[0:7] == 'PD Type': self.pd_type = clean_nl(line[9:]) elif line[0:8] == 'Raw Size': delim = line.find('[') - 4 self.raw_size = float(clean_nl(line[10:delim])) elif line[0:16] == 'Non Coerced Size': delim = line.find('[') - 4 self.non_coerced_size = float(clean_nl(line[18:delim])) elif line[0:12] == 'Coerced Size': delim = line.find('[') - 4 self.coerced_size = float(clean_nl(line[14:delim])) elif line[0:14] == 'Firmware state': self.firmware_state = clean_nl(line[16:]) elif line[0:11] == 'SAS Address': self.sas_address = clean_nl(line[16:]) elif line[0:21] == 'Connected Port Number': self.connected_port_number = clean_nl(line[23:]) elif line[0:12] == 'Inquiry Data': self.inquiry_data = clean_nl(line[14:]) elif line[0:11] == 'FDE Capable': self.fde_capable = clean_nl(line[13:]) elif line[0:10] == 'FDE Enable': self.fde_enable = clean_nl(line[12:]) elif line[0:7] == 'Secured': self.secured = clean_nl(line[9:]) elif line[0:6] == 'Locked': self.locked = clean_nl(line[8:]) elif line[0:13] == 'Foreign State': self.foreign_state = clean_nl(line[15:]) elif line[0:12] == 'Device Speed': self.device_speed = clean_nl(line[14:]) elif line[0:10] == 'Link Speed': self.link_speed = clean_nl(line[12:]) elif line[0:10] == 'Media Type': self.media_type = clean_nl(line[12:]) def load(self, adapter_id, enclosure_id, slot_id): try: ret = megacli('-PdInfo -PhysDrv[%i:%i] -a%i' % (enclosure_id, slot_id, adapter_id)) except OSError: print 'Failed to get physical device information (MegaCli -PdInfo)' return [] self.adapter_id = adapter_id ret_lines = ret.readlines() self.load_from_text(ret_lines) def show(self): ret = """Adapter ID: %s Enclosure Device ID: %s Slot Number: %s Device Id: %s Sequence Number: %s Media Error Count: %s Other Error Count: %s Predictive Failure Count: %s Last Predictive Failure Event Seq Number: %s PD Type: %s Raw Size: %s Non Coerced Size: %s Coerced Size: %s Firmware state: %s SAS Address(0): %s Connected Port Number: %s Inquiry Data: %s FDE Capable: %s FDE Enable: %s Secured: %s Locked: %s Foreign State: %s Device Speed: %s Link Speed: %s Media Type: %s""" % (self.adapter_id, self.enclosure_id, self.slot_id, self.device_id, \ self.sequence_number, self.media_errors, self.other_errors, \ self.predictive_failures, \ self.last_predictive_seq_number, \ self.pd_type, self.raw_size, self.non_coerced_size, \ self.coerced_size, \ self.firmware_state, self.sas_address, self.connected_port_number, \ self.inquiry_data, self.fde_capable, self.fde_enable, \ self.secured, self.locked, self.foreign_state, self.device_speed, \ self.link_speed, self.media_type) print ret class VirtualDrive: virtualdisk_id = 0 name = '' raid_level = '' size = '' state = '' stripe_size = '' number_of_drives = 0 span_depth = 0 default_cache_policy = '' current_cache_policy = '' access_policy = '' disk_cache_policy = '' encryption = '' def load_from_text(self, input): for line in input: if line[0:12] == 'Virtual Disk': offset = line.find('(') self.virtualdisk_id = int(clean_nl(line[14:offset-1])) if line[0:4] == 'Name': self.name = clean_nl(line[6:]) elif line[0:10] == 'RAID Level': self.raid_level = clean_nl(line[12:]) elif line[0:4] == 'Size': delim = line.find(' GB') self.size = clean_nl(line[5:delim]) elif line[0:5] == 'State': self.state = clean_nl(line[7:]) elif line[0:11] == 'Stripe Size': delim = line.find(' KB') self.stripe_size = clean_nl(line[13:delim]) elif line[0:16] == 'Number Of Drives': self.number_of_drives = int(clean_nl(line[17:])) elif line[0:10] == 'Span Depth': self.span_depth = int(clean_nl(line[11:])) elif line[0:20] == 'Default Cache Policy': self.default_cache_policy = clean_nl(line[22:]) elif line[0:20] == 'Current Cache Policy': self.current_cache_policy = clean_nl(line[22:]) elif line[0:13] == 'Access Policy': self.access_policy = clean_nl(line[15:]) elif line[0:17] == 'Disk Cache Policy': self.disk_cache_policy = clean_nl(line[19:]) elif line[0:10] == 'Encryption': self.encryption = clean_nl(line[12:]) def show(self): ret = """Virtual Disk: %d Name: %s RAID Level: %s Size: %s State: %s Strip Size: %s Number Of Drives: %d Span Depth: %d Default Cache Policy: %s Current Cache Policy: %s Access Policy: %s Disk Cache Policy: %s Encryption: %s""" % (self.virtualdisk_id, self.name, self.raid_level, \ self.size, self.state, self.stripe_size, self.number_of_drives, \ self.span_depth, self.default_cache_policy, self.current_cache_policy, \ self.access_policy, self.disk_cache_policy, self.encryption) print ret def adp_list(): try: ret = megacli('-AdpCount -NoLog') except OSError: print 'Failed to get adapter count (MegaCli -AdpCount)' return [] adp_count = 0 for line in ret.readlines(): if line[0:16] == 'Controller Count': adp_count = int(clean_nl(line[17:]).replace('.','')) adp_list = [] adp = Adapter() for adp_id in range(0, adp_count): adp.load(adp_id) adp_list.append(adp) adp = Adapter() return adp_list def enc_list(adapter_id): try: ret = megacli('-EncInfo -a%i' % adapter_id) except OSError: print 'Failed to get enclosure information (MegaCli -EncInfo)' return [] ret_lines = ret.readlines() enc_list = [] enc = Enclosure() # Go through all lines looking for the Enclosure identifier line for line in range(0, len(ret_lines)): if ret_lines[line][0:13] == ' Enclosure': # Feed the enclosure's block of text to the Enclosure object enc.load_from_text(ret_lines[line:line+13]) # Add Enclosure to the enc_list and reset it enc_list.append(enc) enc = Enclosure() return enc_list def pd_list(adapter_id): try: ret = megacli('-PdList -a%i' % adapter_id) except OSError: print 'Failed to get physical device information (MegaCli -PdList)' return [] ret_lines = ret.readlines() pd_list = [] pd = PhysicalDevice() # Go through all lines looking for the first line in the disk info for line in range(0, len(ret_lines)): if ret_lines[line][0:19] == 'Enclosure Device ID': # Feed disk info to the PhysicalDevice object pd.load_from_text(ret_lines[line:line+24]) # Add PhysicalDevice to the pd_list and reset it pd_list.append(pd) pd = PhysicalDevice() return pd_list def vd_list(adapter_id): try: ret = megacli('-LdInfo -Lall -a%i' % adapter_id) except OSError: print 'Failed to get virtual drive information (MegaCli -LDInfo -Lall)' return [] ret_lines = ret.readlines() vd_list = [] vd = VirtualDrive() # Go through all lines looking for the Virtual Disk line for line in range(0, len(ret_lines)): if ret_lines[line][0:12] == 'Virtual Disk': # Feed the virtual drive's block of text to the VirtualDrive object vd.load_from_text(ret_lines[line:line+13]) # Add VirtualDrive to the vd_list and create a new one vd_list.append(vd) vd = VirtualDrive() return vd_list def clean_nl(str): return str.replace('\n', '') def is_present(str): if clean_nl(str) == 'Present': return True else: return False def megacli(args): cmd = MEGACLI + ' ' + args out = Popen(cmd, shell=True, stdout=PIPE).stdout return out
UTF-8
Python
false
false
2,010
15,032,385,545,341
f3065e0565aa23cd7395124fc9103288ae40a691
634bc610b84b3af0d3280106910f050a653b2dc8
/setup.py
fe5a56d306ef15385319c830fde7ddeb5ab43136
[ "MIT" ]
permissive
mabotech/mabolab
https://github.com/mabotech/mabolab
37aec8d501880ebd398a63d099aebe3f3c46dd94
9d70781e438d5597cbb98e3ff3702658036262a3
refs/heads/master
2020-06-04T17:48:03.790904
2014-04-05T08:37:07
2014-04-05T08:37:07
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
import os import sys from setuptools import setup, find_packages here = os.path.abspath(os.path.dirname(__file__)) README = open(os.path.join(here, 'README.txt')).read() CHANGES = open(os.path.join(here, 'CHANGES.txt')).read() requires = [ 'simplejson', 'SQLAlchemy', 'zope.interface', 'flask', 'pyro', ] setup(name='mabolab', version='0.0.1', description='mabolab', long_description=README + '\n\n' + CHANGES, classifiers=[ "Programming Language :: Python", "Framework :: ", "Topic :: Internet :: WWW/HTTP", "Topic :: Internet :: WWW/HTTP :: WSGI :: Application", ], author='MaboTech', license='MIT', author_email='[email protected]', url='http://www.mabotech.com', keywords='mabotech lab lib web', packages=find_packages(), include_package_data=True, zip_safe=False, test_suite='mabolab', install_requires = requires, #data_files=[] )
UTF-8
Python
false
false
2,014
4,947,802,333,073
71d3f222bb8b4123cbd5da9bb086934a54af9771
a0a6cdce9eea9700f9f6bc814eb1e87272db8c18
/ciem/apps/homepage/feeds.py
33598921d8f3154c58b1c7744bf3726b29dac453
[]
no_license
gustav0/ciem
https://github.com/gustav0/ciem
2c4e56d5e495798153dfbe543367a778e0a0672a
5add67c735313cdd4118793ebe46c840219f490b
refs/heads/master
2021-01-22T19:26:15.176098
2013-04-29T21:11:45
2013-04-29T21:11:45
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
from django.contrib.syndication.views import Feed from ciem.apps.data.models import alimento class archiveFeed(Feed): nombre = 'Archive Feed' description = 'Archive Feed' link = '/archive/' def items(self): return alimento.objects.all() def item_link(self, item): return '/archive/' def item_nombre(self, item): return item.nombre def item_description(self, item): return 'hola'
UTF-8
Python
false
false
2,013
4,690,104,309,711
5c5ff6236bd5ca183b8098be62f415a0615517ed
4654171674007e56fd5d56e8c4c99c0c35367f43
/py/auction/time_utils.py
b3e0ab528fe2383e6bd72e7e293ca5cdb7180c02
[]
no_license
pscollins/auction
https://github.com/pscollins/auction
3ec38932f80834b503216202194c0fe521b297e2
b531459c08b16fa1cc0ad177990a78a9668acff8
refs/heads/master
2021-01-18T18:18:36.069637
2014-08-06T07:19:12
2014-08-06T07:19:12
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
import datetime from datetime import datetime as dt from dateutil import tz import time import calendar import re UTC_TZ = tz.gettz('UTC') NY_TZ = tz.gettz('America/New_York') CHI_TZ = tz.gettz('America/Chicago') LOCAL_TZ = tz.tzlocal() __SUBSECOND_RESOLUTION__ = 1000000 __DateRe__ = re.compile(r"(\d\d\d\d)(\d\d)(\d\d)") __CmeDateTimeRe__ = re.compile(r"(\d{4})(\d{2})(\d{2})(\d{2})(\d{2})(\d{2})(\d{3,5})") def start_of_date(year, month, day, tzinfo): """ Given date information and timezone, create a timestamp """ result = dt(year, month, day, tzinfo=tzinfo).astimezone(UTC_TZ) return timestamp_from_datetime(result) def timestamp(): """ Get a current timestamp """ now = dt.utcnow() return calendar.timegm(now.utctimetuple())*__SUBSECOND_RESOLUTION__ + now.microsecond def datetime_from_timestamp(ts): """ Given a timestamp, create the corresponding datetime object """ return dt.fromtimestamp(float(ts)/__SUBSECOND_RESOLUTION__, UTC_TZ) def timestamp_from_datetime(dt): """ Given a datetime in utc, create the corresponding timestamp """ return calendar.timegm(dt.utctimetuple())*__SUBSECOND_RESOLUTION__ + dt.microsecond def timestamp_from_mtime(mt): """ Given a file mtime, create the corresponding timestamp """ return int(mt*__SUBSECOND_RESOLUTION__) def datetime_from_cme_timestamp(ts_str): m = __CmeDateTimeRe__.match(ts_str) year, mon, day, hr, minutes, sec, millis = m.groups() len_millis = len(millis) if len_millis == 3: micros = int(millis) * 1000 elif len_millis == 5: micros = int(millis) * 10 elif len_millis == 4: micros = int(millis) * 100 return datetime.datetime(int(year),int(mon),int(day), int(hr),int(minutes),int(sec), micros) def timestamp_from_cme_timestamp(ts_str): return timestamp_from_datetime(datetime_from_cme_timestamp(ts_str)) def chicago_time(ts): """ Given a timestamp (as utc), get the corresponding chicago time """ stamp = dt.fromtimestamp(float(ts)/__SUBSECOND_RESOLUTION__, UTC_TZ) return stamp.astimezone(CHI_TZ) def chicago_time_str(ts): return chicago_time(ts).strftime('%H:%M:%S.%f') if ts else 'Not Set' def get_date_of_file(fileName): """ Given a filename with a date in it (YYYYMMDD), parse out the date Return None if no date present """ m = __DateRe__.search(fileName) if m: year, month, day = m.groups() return datetime.date(int(year), int(month), int(day)) else: return None def get_date_string(d): return datetime.date.isoformat(d).replace('-','') Monday = 0 Tuesday = 1 Wednesday = 2 Thursday = 3 Friday = 4 Saturday = 5 Sunday = 6 def get_previous_weekday(some_date, desired_day = Sunday): weekday = some_date.weekday() days_back = (desired_day - weekday - 7) if (desired_day > weekday) else (weekday - desired_day) return some_date + datetime.timedelta(days_back) if __name__ == "__main__": def make_timestamp(sod, seconds, millis): seconds = int(seconds) millis = int(millis) return sod + seconds*1000000 + millis*1000 def grab_time(): for i in range(10000): ts = timestamp() import sys now = timestamp() print timestamp(), timestamp(), timestamp() print "now is ", datetime_from_timestamp(now) print "chicago time is ", chicago_time(now).strftime('%H:%M:%S:%f') for i in range(20): t = now + i * (60*60*24*356*__SUBSECOND_RESOLUTION__) print "chicago time:", chicago_time(t), " => ", int(t), " vs ", sys.maxint grab_time() print chicago_time(1311325200095000) # sod = timestamp_from_datetime(datetime(2011, 7, 22)) print "CH sod", start_of_date(2011, 7, 22, CHI_TZ) print "NY sod", start_of_date(2011, 7, 22, NY_TZ) print "CH", chicago_time(start_of_date(2011, 7, 22, CHI_TZ)) print "NY", chicago_time(start_of_date(2011, 7, 22, NY_TZ)) dtnow = dt.utcnow() print (calendar.timegm(dtnow.utctimetuple())*__SUBSECOND_RESOLUTION__), "vs", dtnow.microsecond print (calendar.timegm(dtnow.utctimetuple())*__SUBSECOND_RESOLUTION__ + dtnow.microsecond), "vs", dtnow.microsecond print chicago_time(1311321600730000) print "Sample timestamp", datetime_from_cme_timestamp('20120213183040306'), print "in chicago", chicago_time(timestamp_from_datetime(datetime_from_cme_timestamp('20120213183040306'))) print "UTC sod", make_timestamp(start_of_date(2011, 7, 22, CHI_TZ), 28800, 741) print "CH sod", chicago_time(make_timestamp(start_of_date(2011, 7, 22, CHI_TZ), 28800, 741)) print "UTC sod", make_timestamp(start_of_date(2011, 7, 22, NY_TZ), 14400, 730) print "CH sod", chicago_time(make_timestamp(start_of_date(2011, 7, 22, NY_TZ), 14400, 730)) some_day = datetime.date(2001,1,1) print some_day, some_day.weekday() print get_previous_weekday(some_day, Saturday) print get_date_string(some_day) print chicago_time(timestamp_from_cme_timestamp("20081223195210641")) print chicago_time(timestamp_from_cme_timestamp("2011102813300000175")) print chicago_time(timestamp_from_cme_timestamp("201110281330000017")) print chicago_time_str(timestamp_from_cme_timestamp("20081223195210641")) print chicago_time_str(timestamp_from_cme_timestamp("2011102813300000175")) print chicago_time_str(timestamp_from_cme_timestamp("20111028032135310")) print chicago_time_str(timestamp_from_cme_timestamp("201110281330000017"))
UTF-8
Python
false
false
2,014
1,486,058,723,517
cd32a518713f3c2b8e1bfb58077ade58d7df3ebc
0264c852a317b4fd7888af59e5a42103ea8561d0
/apps/words/words/__init__.py
9b4985db2a966bc0d22fcb42c07bd824dcc37794
[]
no_license
yimiqisan/qisan
https://github.com/yimiqisan/qisan
9f54998b0f33993ce248da4e6ded121c8ec7d038
8c0f70b23af0e6135e168f85bda5fbee4dffda1e
refs/heads/master
2020-04-05T23:17:30.103940
2013-06-20T09:57:53
2013-06-20T09:57:53
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
#!/usr/bin/env python # encoding: utf-8 """ __init__.py Created by 刘 智勇 on 2013-06-16. Copyright (c) 2013 __MyCompanyName__. All rights reserved. """ from flask import Flask from views import blueprint_site, blueprint_apis, blueprint_weixin app = Flask(__name__) app.register_blueprint(blueprint_site) app.register_blueprint(blueprint_apis) app.register_blueprint(blueprint_weixin)
UTF-8
Python
false
false
2,013
3,178,275,808,409
775db518a29ab29cd51f07806d4ce514db4218a1
b1e7181dd2c7379e60ab1fdff954e68b597c6bfb
/test_python_client.py
21302fe42227baf50a98c56fad9a13b4d4492959
[ "MIT" ]
permissive
zed-throben/erlang-bertrpc-server
https://github.com/zed-throben/erlang-bertrpc-server
68a34cefde47ca4b847ffd5d1469d274245e970d
82e6456e6277e0677322ae4900079c8b1bc6e81d
refs/heads/master
2020-06-15T06:53:35.612505
2014-07-13T22:02:46
2014-07-13T22:02:46
20,970,686
1
0
null
null
null
null
null
null
null
null
null
null
null
null
null
# easy_install bertrpc import bertrpc service = bertrpc.Service('127.0.0.1', 9999) response = service.request('call').lists.reverse( [1, 2,3] ) print( response )
UTF-8
Python
false
false
2,014
10,969,346,509,728
ed338cb090b88bab449a47294b435ded9515ffbf
81fc357f0849890c8ee3232d1202e6b8e1a59f64
/autobt/manager.py
0cc31f6c33232d31b8b473273a6e53cc0fda7a4a
[]
no_license
patrick-lu/auto
https://github.com/patrick-lu/auto
a3f436818518e38d956ed3a1adf36d140c993276
bfd58c89c0c15428f066f630fcee294b10cd3c01
refs/heads/master
2020-06-06T04:14:04.343237
2012-07-05T14:04:15
2012-07-05T14:04:15
4,414,756
2
0
null
null
null
null
null
null
null
null
null
null
null
null
null
#!/usr/bin/env python import zmq host="127.0.0.1:6000" context = zmq.Context() socket = context.socket(zmq.REQ) socket.connect("tcp://"+host) #for i in range(10): msg = {'cmd':'start_reply'} msg = {'cmd':'start_work'} #msg = json.dumps(msg) socket.send_json(msg) msg_in = socket.recv();
UTF-8
Python
false
false
2,012
7,413,113,602,533
4efd226e067cf648f2f622f19c8540092e5f2a85
28b13e5fff19c1fb83bc0e9ec9b00651241afc2e
/openprofile/objects/__init__.py
74526b90f73b137da3f3c86cdd9d5fbb3b64076b
[ "LicenseRef-scancode-warranty-disclaimer", "LicenseRef-scancode-proprietary-license", "LicenseRef-scancode-unknown-license-reference", "CC-BY-NC-ND-3.0" ]
non_permissive
koalalorenzo/OpenProfile
https://github.com/koalalorenzo/OpenProfile
7a69aae26e936649a91ffee8339b69fda56a6a2d
a292f0c56b1d90353eff93e825d7d2346f73c6ec
refs/heads/master
2021-01-18T14:34:01.730900
2013-03-09T20:33:16
2013-03-09T20:33:16
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
from openprofile.objects.Connection import Connection from openprofile.objects.Page import Page, PageNotFound from openprofile.objects.Message import Message from openprofile.objects.Profile import Profile, AdminNotFound
UTF-8
Python
false
false
2,013
11,570,641,906,287
965a1951c45593d4c8ccdf03de6dab98b1a2cd86
49013a6493b803af396f6dd7645a360d2807e80b
/src/neuroutils/__init__.py
0e2b5bacf6ed2e23a77db43a63e4a1f36b8df236
[]
no_license
lixiaolong19890207/neuroutils
https://github.com/lixiaolong19890207/neuroutils
2c1caf5b52283fe7652fa7aa2a158378aa69b620
85151c30ad43745352c6dc641707d42b867a5adf
refs/heads/master
2021-05-28T08:25:06.613585
2012-02-27T12:24:51
2012-02-27T12:24:51
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
from nipype.utils.config import config import matplotlib matplotlib.use(config.get("execution", "matplotlib_backend")) from vis import Overlay, PsMerge, Ps2Pdf, PlotRealignemntParameters from threshold import ThresholdGGMM, CreateTopoFDRwithGGMM, ThresholdGMM, ThresholdFDR from simgen import SimulationGenerator from resampling import CalculateNonParametricFWEThreshold, CalculateProbabilityFromSamples, CalculateFDRQMap from bootstrapping import BootstrapTimeSeries, PermuteTimeSeries from bedpostx_particle_reader import Particle2Trackvis from annotate_tracks import AnnotateTracts from icc import ICC import numpy as np def estimate_fdr_and_fnr(true_pattern, exp_result): false_positives = sum(exp_result[true_pattern != 1] != 0) false_negatives = sum(exp_result[true_pattern != 0] == 0) all_positives = np.sum(exp_result != 0) all_negatives = np.sum(exp_result == 0) if all_positives == 0: fdr = 0 else: fdr = float(false_positives)/float(all_positives) if all_negatives == 0: fnr = 0 else: fnr = float(false_negatives)/float(all_negatives) return (fdr, fnr)
UTF-8
Python
false
false
2,012
19,473,381,757,303
5d6d7e65c22043998ad7c0d233b4276525d4c115
8636807e06d87e4a190edd8e3e6f701b844cfce4
/turkey.py
3252d435aa0d08bba5e57a1edc4ef6b1bdb2a9ec
[]
no_license
dbenamy/agent-plugins
https://github.com/dbenamy/agent-plugins
5c3c6eb79e77c6d683ed3174bdb532528a97e89d
5b8a8c50ed4c11e6403f08e106adb8ef52b4572e
refs/heads/master
2021-01-15T17:20:56.846554
2011-02-14T18:17:53
2011-02-14T18:17:53
1,131,027
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
#!/usr/bin/python # # _<_< > # ____________/_/ _/ # _/ turkey.py _/ /_ # / custom / \ > # ( plugin \_____// # \________________/ # # First used with a Backwoods smoker to monitor # the temp of a turkey during cook time. You will # need to adjust the temp thresholds to match your # specific cooking requirements. Enjoy! # # ~Team Cloudkick, Thanksgiving 2010 import time import struct import sys ldusb = file("/dev/ldusb0") time.sleep(0.5) # This reads the payload off of the Go!Temp USB drive pkt = ldusb.read(8) parsed_pkt = list(struct.unpack("<BBHHH", pkt)) num_samples = parsed_pkt.pop(0) seqno = parsed_pkt.pop(0) for sample in range(num_samples): c = parsed_pkt[sample]/128.0 # Convert to Fahrenheit since this is for Thanksgiving f = 9.0 / 5.0 * c + 32 # This is the actual alerting threshold, # tweak as needed if f > 200 and f < 300: status = 'ok' else: status = 'err' print 'status %s temp at %d' % (status, f) print 'metric temp int %d' % (f)
UTF-8
Python
false
false
2,011
4,054,449,131,639
1664d68bdf35734e13e114e8edfd50d290054319
8b9f896d8b9c457dcbb1032659c15717385c6711
/pyarff/arff/ArffFile.py
d3be7584008e896212808337382afa6e7267cfa2
[]
no_license
nybblr/pyarff
https://github.com/nybblr/pyarff
bac42c7dace25ffbf82f657970e5733006a17a85
ae1f7b0fd0d4d82fea17ace8192717062d578149
refs/heads/master
2021-01-20T03:34:21.844169
2013-03-05T02:32:58
2013-03-05T02:32:58
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
class ArffAttribute(object): """ Name can be any alphanumerical value DataType can be any of STRING, NUMERIC or NOMINAL """ def __init__(self, index, name, dataType): self.index = index self.name = name self.isContinuous = False self.dataType = dataType.strip() self.values = None self.valuesCount = {} self.missing = 0 self.mean = 0 self.median = 0 self.mode = None self.sum = 0 self.min = None self.max = None self.stdDev = 0 self.nbrOfBins = 0 if self.dataType in ["NUMERIC", "INTEGER", "REAL"]: self.isContinuous = True elif dataType not in ["NUMERIC", "INTEGER", "REAL", "STRING"]: self.values = self.dataType[1:-1].split(",") self.nbrOfBins = len(self.values) self.dataType = "NOMINAL" for i in range(self.nbrOfBins): self.values[i] = self.values[i].strip() def getValues(self): values = [] for k in self.valuesCount.iterkeys(): values.append(k) return values def __str__(self): output = "Name: %s, DataType: %s, IsContinuos: %d, Missing values: %d" % ( self.name, self.dataType, self.isContinuous, self.missing) if self.isContinuous == 1: output += "\n\tMean: %.3f" % self.mean output += "\n\tMedian: %.3f" % self.median output += "\n\tMax: %.3f" % self.max output += "\n\tMin: %.3f" % self.min else: output += ", Mode: %s" % self.mode for key, value in self.valuesCount.items(): output += "\n\tValue %s: %4d" % (key, value) return output class ArffRecord(object): def __init__(self, values): self.values = [] if isinstance(values, str): self.values = values.strip().split(",") else: self.values = values def __str__(self): return str(self.values) class ArffFile(object): def __init__(self): self.relationName = "" self.records = [] self.attributes = [] self.countOfAttributes = 0 self.countOfRecords = 0 def getNameForAttribute(self, index): return self.attributes[index].name def getIndexForAttribute(self, name): for attr in self.attributes: if attr.name == name: return attr.index return -1 def handleMissingValues(self, strategy): if strategy == DISCARD_RECORD: originalRecords = self.records # keep a copy self.records = [] # clear array for record in originalRecords: discard = False for attr in self.attributes: if record.values[attr.index] == "?": discard = True if not discard: self.records.append(record) elif strategy == MOST_PROBABLE: for record in self.records: for attr in self.attributes: if record.values[attr.index] == "?": if attr.isContinuous: record.values[attr.index] = attr.median else: record.values[attr.index] = attr.mode else: raise Exception("Unknown strategy")
UTF-8
Python
false
false
2,013