{ // 获取包含Hugging Face文本的span元素 const spans = link.querySelectorAll('span.whitespace-nowrap, span.hidden.whitespace-nowrap'); spans.forEach(span => { if (span.textContent && span.textContent.trim().match(/Hugging\s*Face/i)) { span.textContent = 'AI快站'; } }); }); // 替换logo图片的alt属性 document.querySelectorAll('img[alt*="Hugging"], img[alt*="Face"]').forEach(img => { if (img.alt.match(/Hugging\s*Face/i)) { img.alt = 'AI快站 logo'; } }); } // 替换导航栏中的链接 function replaceNavigationLinks() { // 已替换标记,防止重复运行 if (window._navLinksReplaced) { return; } // 已经替换过的链接集合,防止重复替换 const replacedLinks = new Set(); // 只在导航栏区域查找和替换链接 const headerArea = document.querySelector('header') || document.querySelector('nav'); if (!headerArea) { return; } // 在导航区域内查找链接 const navLinks = headerArea.querySelectorAll('a'); navLinks.forEach(link => { // 如果已经替换过,跳过 if (replacedLinks.has(link)) return; const linkText = link.textContent.trim(); const linkHref = link.getAttribute('href') || ''; // 替换Spaces链接 - 仅替换一次 if ( (linkHref.includes('/spaces') || linkHref === '/spaces' || linkText === 'Spaces' || linkText.match(/^s*Spacess*$/i)) && linkText !== 'OCR模型免费转Markdown' && linkText !== 'OCR模型免费转Markdown' ) { link.textContent = 'OCR模型免费转Markdown'; link.href = 'https://fast360.xyz'; link.setAttribute('target', '_blank'); link.setAttribute('rel', 'noopener noreferrer'); replacedLinks.add(link); } // 删除Posts链接 else if ( (linkHref.includes('/posts') || linkHref === '/posts' || linkText === 'Posts' || linkText.match(/^s*Postss*$/i)) ) { if (link.parentNode) { link.parentNode.removeChild(link); } replacedLinks.add(link); } // 替换Docs链接 - 仅替换一次 else if ( (linkHref.includes('/docs') || linkHref === '/docs' || linkText === 'Docs' || linkText.match(/^s*Docss*$/i)) && linkText !== '模型下载攻略' ) { link.textContent = '模型下载攻略'; link.href = '/'; replacedLinks.add(link); } // 删除Enterprise链接 else if ( (linkHref.includes('/enterprise') || linkHref === '/enterprise' || linkText === 'Enterprise' || linkText.match(/^s*Enterprises*$/i)) ) { if (link.parentNode) { link.parentNode.removeChild(link); } replacedLinks.add(link); } }); // 查找可能嵌套的Spaces和Posts文本 const textNodes = []; function findTextNodes(element) { if (element.nodeType === Node.TEXT_NODE) { const text = element.textContent.trim(); if (text === 'Spaces' || text === 'Posts' || text === 'Enterprise') { textNodes.push(element); } } else { for (const child of element.childNodes) { findTextNodes(child); } } } // 只在导航区域内查找文本节点 findTextNodes(headerArea); // 替换找到的文本节点 textNodes.forEach(node => { const text = node.textContent.trim(); if (text === 'Spaces') { node.textContent = node.textContent.replace(/Spaces/g, 'OCR模型免费转Markdown'); } else if (text === 'Posts') { // 删除Posts文本节点 if (node.parentNode) { node.parentNode.removeChild(node); } } else if (text === 'Enterprise') { // 删除Enterprise文本节点 if (node.parentNode) { node.parentNode.removeChild(node); } } }); // 标记已替换完成 window._navLinksReplaced = true; } // 替换代码区域中的域名 function replaceCodeDomains() { // 特别处理span.hljs-string和span.njs-string元素 document.querySelectorAll('span.hljs-string, span.njs-string, span[class*="hljs-string"], span[class*="njs-string"]').forEach(span => { if (span.textContent && span.textContent.includes('huggingface.co')) { span.textContent = span.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } }); // 替换hljs-string类的span中的域名(移除多余的转义符号) document.querySelectorAll('span.hljs-string, span[class*="hljs-string"]').forEach(span => { if (span.textContent && span.textContent.includes('huggingface.co')) { span.textContent = span.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } }); // 替换pre和code标签中包含git clone命令的域名 document.querySelectorAll('pre, code').forEach(element => { if (element.textContent && element.textContent.includes('git clone')) { const text = element.innerHTML; if (text.includes('huggingface.co')) { element.innerHTML = text.replace(/huggingface.co/g, 'aifasthub.com'); } } }); // 处理特定的命令行示例 document.querySelectorAll('pre, code').forEach(element => { const text = element.innerHTML; if (text.includes('huggingface.co')) { // 针对git clone命令的专门处理 if (text.includes('git clone') || text.includes('GIT_LFS_SKIP_SMUDGE=1')) { element.innerHTML = text.replace(/huggingface.co/g, 'aifasthub.com'); } } }); // 特别处理模型下载页面上的代码片段 document.querySelectorAll('.flex.border-t, .svelte_hydrator, .inline-block').forEach(container => { const content = container.innerHTML; if (content && content.includes('huggingface.co')) { container.innerHTML = content.replace(/huggingface.co/g, 'aifasthub.com'); } }); // 特别处理模型仓库克隆对话框中的代码片段 try { // 查找包含"Clone this model repository"标题的对话框 const cloneDialog = document.querySelector('.svelte_hydration_boundary, [data-target="MainHeader"]'); if (cloneDialog) { // 查找对话框中所有的代码片段和命令示例 const codeElements = cloneDialog.querySelectorAll('pre, code, span'); codeElements.forEach(element => { if (element.textContent && element.textContent.includes('huggingface.co')) { if (element.innerHTML.includes('huggingface.co')) { element.innerHTML = element.innerHTML.replace(/huggingface.co/g, 'aifasthub.com'); } else { element.textContent = element.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } } }); } // 更精确地定位克隆命令中的域名 document.querySelectorAll('[data-target]').forEach(container => { const codeBlocks = container.querySelectorAll('pre, code, span.hljs-string'); codeBlocks.forEach(block => { if (block.textContent && block.textContent.includes('huggingface.co')) { if (block.innerHTML.includes('huggingface.co')) { block.innerHTML = block.innerHTML.replace(/huggingface.co/g, 'aifasthub.com'); } else { block.textContent = block.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } } }); }); } catch (e) { // 错误处理但不打印日志 } } // 当DOM加载完成后执行替换 if (document.readyState === 'loading') { document.addEventListener('DOMContentLoaded', () => { replaceHeaderBranding(); replaceNavigationLinks(); replaceCodeDomains(); // 只在必要时执行替换 - 3秒后再次检查 setTimeout(() => { if (!window._navLinksReplaced) { console.log('[Client] 3秒后重新检查导航链接'); replaceNavigationLinks(); } }, 3000); }); } else { replaceHeaderBranding(); replaceNavigationLinks(); replaceCodeDomains(); // 只在必要时执行替换 - 3秒后再次检查 setTimeout(() => { if (!window._navLinksReplaced) { console.log('[Client] 3秒后重新检查导航链接'); replaceNavigationLinks(); } }, 3000); } // 增加一个MutationObserver来处理可能的动态元素加载 const observer = new MutationObserver(mutations => { // 检查是否导航区域有变化 const hasNavChanges = mutations.some(mutation => { // 检查是否存在header或nav元素变化 return Array.from(mutation.addedNodes).some(node => { if (node.nodeType === Node.ELEMENT_NODE) { // 检查是否是导航元素或其子元素 if (node.tagName === 'HEADER' || node.tagName === 'NAV' || node.querySelector('header, nav')) { return true; } // 检查是否在导航元素内部 let parent = node.parentElement; while (parent) { if (parent.tagName === 'HEADER' || parent.tagName === 'NAV') { return true; } parent = parent.parentElement; } } return false; }); }); // 只在导航区域有变化时执行替换 if (hasNavChanges) { // 重置替换状态,允许再次替换 window._navLinksReplaced = false; replaceHeaderBranding(); replaceNavigationLinks(); } }); // 开始观察document.body的变化,包括子节点 if (document.body) { observer.observe(document.body, { childList: true, subtree: true }); } else { document.addEventListener('DOMContentLoaded', () => { observer.observe(document.body, { childList: true, subtree: true }); }); } })(); \n\nfrom bs4 import BeautifulSoup\nimport urllib.request\n\nwith open(\"example.html\") as fp:\n soup = BeautifulSoup(fp, 'html.parser')\n\nsoup\nprint(soup.prettify())\n\n \n \n Page Title\n \n \n \n

\n Heading 1\n

\n

\n Paragraph\n

\n
\n \n google\n \n
\n
\n

\n a\n

\n \n naver\n \n

\n b\n

\n

\n c\n

\n
\n
\n Example page\n

\n g\n

\n
\n \n\n### HTML 태그 파싱soup.title\nsoup.title.name #title 태그 네임\nsoup.title.string #title 태그 속 string\nsoup.title.parent.name #title의 parent 태그 네임\nsoup.h1\nsoup.p #가장 처음 p 태그\nsoup.div #가장 처음 div 태그\nsoup.a### HTML 태그 검색* `find()`: 해당 조건에 맞는 하나의 태그를 가져옴\n* `find_all()`: 해당 조건에 맞는 모든 태그를 가져옴\n* `select()`: CSS 선택자와 같은 형식으로 선택 가능soup_find = soup.find(\"div\")\nprint(soup_find)\nsoup_find_all = soup.find_all(\"div\")\nprint(soup_find_all) #리스트 형태로 반환\nfind_by_id = soup.find_all('div', {'id':'id1'}) # {}: 속성값\nprint(find_by_id) #리스트 형태로 반환\nfind_by_class = soup.find_all('div', {'class':'class1'})\nprint(find_by_class)\nsoup.find('a').get('href') #첫 번째 a 태그의 href 속성값\nsoup.find('a').get_text()\nsite_names = soup.find_all('a')\nfor name in site_names:\n print(name.get_text())\nid1 = soup.select('div#id1') #css처럼, id는 '#'으로, class는 '.'으로 접근\nid1\nclass1 = soup.select('div.class1')\nclass1\nclass1_a = soup.select('div.class1 a') #또는 'div.class1 > a'\nclass1_a## 웹페이지 콘텐츠 가져오기%%writefile anthem.html\n\n\n \n \n \n
\n

애국가

\n

\n 동해물과 백두산이 마르고 닳도록 하느님이 보우하사 우리나라 만세.
\n 무궁화 삼천리 화려 강산 대한 사람, 대한으로 길이 보전하세.
\n

\n

\n 남산 위에 저 소나무, 철갑을 두른 듯 바람 서리 불변함은 우리 기상일세.
\n 무궁화 삼천리 화려 강산 대한 사람, 대한으로 길이 보전하세.
\n

\n

\n 가을 하늘 공활한데 높고 구름 없이 밝은 달은 우리 가슴 일편단심일세.
\n 무궁화 삼천리 화려 강산 대한 사람, 대한으로 길이 보전하세.
\n

\n

\n 이 기상과 이 맘으로 충성을 다하여 괴로우나 즐거우나 나라 사랑하세.
\n 무궁화 삼천리 화려 강산 대한 사람, 대한으로 길이 보전하세.
\n

\n
\n \n\nwith open(\"anthem.html\") as fp:\n soup = BeautifulSoup(fp, 'html.parser')\n\nsoup\ntitle = soup.find('p', {'id':'title'})\ncontents = soup.find_all('p', {'id':'content'})\n\nprint(title.get_text())\nfor content in contents:\n print(content.get_text())애국가\n\n 동해물과 백두산이 마르고 닳도록 하느님이 보우하사 우리나라 만세.\n 무궁화 삼천리 화려 강산 대한 사람, 대한으로 길이 보전하세.\n\n\n 남산 위에 저 소나무, 철갑을 두른 듯 바람 서리 불변함은 우리 기상일세.\n 무궁화 삼천리 화려 강산 대한 사람, 대한으로 길이 보전하세.\n\n\n 가을 하늘 공활한데 높고 구름 없이 밝은 달은 우리 가슴 일편단심일세.\n 무궁화 삼천리 화려 강산 대한 사람, 대한으로 길이 보전하세.\n\n\n 이 기상과 이 맘으로 충성을 다하여 괴로우나 즐거우나 나라 사랑하세.\n 무궁화 삼천리 화려 강산 대한 사람, 대한으로 길이 보전하세.\n\n## 인터넷 웹페이지 가져오기url = \"http://suanlab.com\"\nhtml = urllib.request.urlopen(url).read() #해당 url의 웹페이지에 대한 html 문서 읽어옴\nsoup = BeautifulSoup(html, 'html.parser')\nsoup\nlabels = soup.find_all(['label'])\nfor label in labels:\n print(label.get_text())\nlabels = soup.select('#wrapper > section > div > div > div > div > div > label') #구조선택자 관련된 부분은 지워줌 ex) div:nth-child(1) -> div\nfor label in labels:\n print(label.get_text())[2020-05-20] \"인공지능의 보안 위협\" 칼럼\n[2020-03-04] \"데이터 경제 시대\" 칼럼\n[2019-12-25] \"마이데이터 시대의 도래 데이터 주권과 새로운 가치\" 칼럼\n[2019-09-25] \"유튜브 탄생과 크리에이터 시대\" 칼럼\n[2019-09-04] \"농업으로 들어간 인공지능\" 칼럼\n[2019-08-07] \"AI시대 지배할 것인가 지배당하며 살 것인가\" 칼럼\n[2018-12-30] \"파이썬으로 텍스트 분석하기\" 책 출판\n"},"license":{"kind":"string","value":"no_license"},"path":{"kind":"string","value":"/BS_BeautyfulSoup_기초.ipynb"},"repo_name":{"kind":"string","value":"howecofe/Web-Data-Programming"},"chain_length":{"kind":"number","value":5,"string":"5"}}},{"rowIdx":4874,"cells":{"content":{"kind":"string","value":"# Fully Connected Neural Network\n\nNotebook inspired by https://github.com/aymericdamien/TensorFlow-Examples/\n\nExample is using the [MNIST database of handwritten digits](http://yann.lecun.com/exdb/mnist/)# Import MINST data\nfrom tensorflow.examples.tutorials.mnist import input_data\nmnist = input_data.read_data_sets(\"/tmp/data/\", one_hot=True)\n\nimport tensorflow as tf\n# Parameters\nlearning_rate = 0.001\ntraining_epochs = 15\nbatch_size = 100\ndisplay_step = 1\n\n# Network Parameters\nn_hidden = 256\nn_input = 784 # MNIST data input (img shape: 28*28)\nn_classes = 10 # MNIST total classes (0-9 digits)\n\ntf.reset_default_graph()\n\n# tf Graph input\nx = tf.placeholder(\"float\", [None, n_input])\ny = tf.placeholder(\"float\", [None, n_classes])## Exercise 1\n\nDefine a function that builds a fully connected neural network. You will need to complete these steps:\n\n1. define 4 `tf.Variable` with the appropriate shapes for W, b, W_out, b_out. Initialize them with random values.\n\n- define a super simple network with 1 layer that performs the operation:\n\n relu(x * W + b)\n\n- define an output layer that performs the operation:\n\n softmax(x * W_out + b_out)\n\n- encapsulate these in a function called `dnn` that takes `x` as input and returns the output layer# Create model\ndef dnn(x, n_hidden_1):\n # your code here\n# Construct model\npred = dnn(x, n_hidden)What does the graph look like for this network?g = tf.get_default_graph()\n[op.name for op in g.get_operations()]\n# Define loss and optimizer\ncost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=pred, labels=y))\noptimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)\n\n# Initializing the variables\ninit = tf.global_variables_initializer()\n# Launch the graph\nwith tf.Session() as sess:\n sess.run(init)\n\n # Training cycle\n for epoch in range(training_epochs):\n avg_cost = 0.\n total_batch = int(mnist.train.num_examples/batch_size)\n # Loop over all batches\n for i in range(total_batch):\n batch_x, batch_y = mnist.train.next_batch(batch_size)\n # Run optimization op (backprop) and cost op (to get loss value)\n _, c = sess.run([optimizer, cost], feed_dict={x: batch_x,\n y: batch_y})\n # Compute average loss\n avg_cost += c / total_batch\n # Display logs per epoch step\n if epoch % display_step == 0:\n print \"Epoch:\", '%04d' % (epoch+1), \"cost=\", \\\n \"{:.9f}\".format(avg_cost)\n print \"Optimization Finished!\"\n\n # Test model\n correct_prediction = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1))\n # Calculate accuracy\n accuracy = tf.reduce_mean(tf.cast(correct_prediction, \"float\"))\n print \"Accuracy:\", accuracy.eval({x: mnist.test.images, y: mnist.test.labels})## Exercise 2\n\n1. modify the `dnn` function adding a second hidden layer also with `relu` activation# Create model\ndef dnn_2(x, n_hidden_1, n_hidden_2):\n # your code here\npred = dnn_2(x, 512, 256)\n# Define loss and optimizer\ncost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=pred, labels=y))\noptimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)\n\ninit = tf.global_variables_initializer()\n# Launch the graph\nwith tf.Session() as sess:\n sess.run(init)\n\n # Training cycle\n for epoch in range(training_epochs):\n avg_cost = 0.\n total_batch = int(mnist.train.num_examples/batch_size)\n # Loop over all batches\n for i in range(total_batch):\n batch_x, batch_y = mnist.train.next_batch(batch_size)\n # Run optimization op (backprop) and cost op (to get loss value)\n _, c = sess.run([optimizer, cost], feed_dict={x: batch_x,\n y: batch_y})\n # Compute average loss\n avg_cost += c / total_batch\n # Display logs per epoch step\n if epoch % display_step == 0:\n print \"Epoch:\", '%04d' % (epoch+1), \"cost=\", \\\n \"{:.9f}\".format(avg_cost)\n print \"Optimization Finished!\"\n\n # Test model\n correct_prediction = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1))\n # Calculate accuracy\n accuracy = tf.reduce_mean(tf.cast(correct_prediction, \"float\"))\n print \"Accuracy:\", accuracy.eval({x: mnist.test.images, y: mnist.test.labels})\n"},"license":{"kind":"string","value":"no_license"},"path":{"kind":"string","value":"/advanced_deep_learning/1d_fully_connected.ipynb"},"repo_name":{"kind":"string","value":"marionleborgne/ml"},"chain_length":{"kind":"number","value":4,"string":"4"}}},{"rowIdx":4875,"cells":{"content":{"kind":"string","value":"# List of Problems \n\n\n\n- [Problem midpoint](#problem_midpoint)\n\n- [Problem tableau](#problem_tableau)\n\n- [Problem Runge Kutta4](#problem_rk4)\n\n- [Problem embedded](#problem_embedded)\n\n- [Problem coding A](#prob_a)\n\n- [Problem coding B](#prob_b)\n\n- [Problem coding C](#prob_c)\n\n\n# Objectives\nIn this lab, you will explore Runge-Kutta methods for solving ordinary\ndifferential equations. The goal is to gain a better understanding of\nsome of the more popular Runge-Kutta methods and the corresponding\nnumerical code.\n\nSpecifically you will be able to:\n\n- describe the mid-point method\n\n- construct a Runge-Kutta tableau from equations or equations from a\n tableau\n\n- describe how a Runge-Kutta method estimates truncation error\n\n- edit a working Octave code to use a different method or solve a\n different problem# Readings\n\n\nThere is no required reading for this lab, beyond the contents of the\nlab itself. However, if you would like additional background on any of\nthe following topics, then refer to the sections indicated below.\n\n**Runge-Kutta Methods:**\n\n - Newman, Chapter 8\n\n - Press, et al.  Section 16.1\n\n - Burden & Faires  Section 5.4\n # Solving Ordinary Differential Equations with the Runge-Kutta Methods \n\nOrdinary differential equations (ODEs) arise in many physical\nsituations. For example, there is the first-order Newton cooling\nequation discussed in , and perhaps the most famous equation of all, the\nsecond-order Newton’s Second Law of Mechanics $F=ma$ .\n\nIn general, higher-order equations, such as Newton’s force equation, can\nbe rewritten as a system of first-order equations . So the generic\nproblem in ODEs is a set of N coupled first-order differential equations\nof the form, \n\n$$\n \\frac{d{\\bf y}}{dt} = f({\\bf y},t)\n$$ \n \nwhere ${\\bf y}$ is a vector of\nvariables.\n\nFor a complete specification of the solution, boundary conditions for\nthe problem must be given. Typically, the problems are broken up into\ntwo classes:\n\n- **Initial Value Problem (IVP)**: the initial values of\n ${\\bf y}$ are specified.\n\n- **Boundary Value Problem (BVP)**: ${\\bf y}$ is\n specified at the initial and final times.\n\nFor this lab, we are concerned with the IVP’s. BVP’s tend to be much\nmore difficult to solve and involve techniques which will not be dealt\nwith in this set of labs.\n\nNow as was pointed out in , in general, it will not be possible to find\nexact, analytic solutions to the ODE. However, it is possible to find an\napproximate solution with a finite difference scheme such as the forward\nEuler method . This is a simple first-order, one-step scheme which is\neasy to implement. However, this method is rarely used in practice as it\nis neither very stable nor accurate.\n\nThe higher-order Taylor methods discussed in are one alternative but\ninvolve higher-order derivatives that must be calculated by hand or\nworked out numerically in a multi-step scheme. Like the forward Euler\nmethod, stability is a concern.\n\nThe Runge-Kutta methods are higher-order, one-step schemes that makes\nuse of information at different *stages* between the\nbeginning and end of a step. They are more stable and accurate than the\nforward Euler method and are still relatively simple compared to schemes\nsuch as the multi-step predictor-corrector methods or the Bulirsch-Stoer\nmethod. Though they lack the accuracy and efficiency of these more\nsophisticated schemes, they are still powerful methods that almost\nalways succeed for non-stiff IVPs.# The Midpoint Method: A Two-Stage Runge-Kutta Method \n\nThe forward Euler method takes the solution at time $t_n$ and advances\nit to time $t_{n+1}$ using the value of the derivative $f(y_n,t_n)$ at\ntime $t_n$ \n\n$$y_{n+1} = y_n + h f(y_n,t_n)$$ \n\nwhere $h \\equiv \\Delta t$.from IPython.display import Image\nImage(filename=\"images/euler.png\")Figure euler: The forward Euler method is essentially a straight-line approximation\nto the solution, over the interval of one step, using the derivative at\nthe starting point as the slope. \n\nThe idea of the Runge-Kutta schemes is to take advantage of derivative\ninformation at the times between $t_n$ and $t_{n+1}$ to increase the\norder of accuracy.\n\nFor example, in the midpoint method, the derivative at the initial time\nis used to approximate the derivative at the midpoint of the interval,\n$f(y_n+\\frac{1}{2}hf(y_n,t_n), t_n+\\frac{1}{2}h)$. The derivative at the\nmidpoint is then used to advance the solution to the next step. The\nmethod can be written in two *stages* $k_i$,\n\n\n$$\n\\begin{aligned}\n \\begin{array}{l}\n k_1 = h f(y_n,t_n)\\\\\n k_2 = h f(y_n+\\frac{1}{2}k_1, t_n+\\frac{1}{2}h)\\ \\ \\rm{eq: midpoint}\\\\\n y_{n+1} = y_n + k_2\n \\end{array}\n\\end{aligned}\n$$ \n\nThe midpoint method is known\nas a 2-stage Runge-Kutta formula.\n\nImage(filename='images/midpoint.png')Figure midpoint: The midpoint method again uses the derivative at the starting point to\napproximate the solution at the midpoint. The derivative at the midpoint\nis then used as the slope of the straight-line approximation.# Second-Order Runge-Kutta Methods\n\nAs was shown in lab 2 , the error in the forward Euler method is\nproportional to $h$. In other words, the forward Euler method has an\naccuracy which is *first order* in $h$.\n\nThe advantage of the midpoint method is that the extra derivative\ninformation at the midpoint results in the first order error term\ncancelling out, making the method *second order* accurate.\nThis can be shown by a Taylor expansion of equation\n[eq: midpoint](#eq_midpoint)\n\n\n**Problem midpoint**: Even though the midpoint method is second-order\naccurate, it may still be less accurate than the forward Euler method.\nIn the demo below, compare the accuracy of the two methods on the\ninitial value problem \n\n$$\n \\frac{dy}{dt} = -y +t +1, \\;\\;\\;\\; y(0) =1\\ \\ \\textbf{eq: linexp}\n$$ \n\nwhich has the exact\nsolution \n$$\n y(t) = t + e^{-t}\n$$1. Why is it possible that the midpoint method may be less accurate\n than the forward Euler method, even though it is a higher order\n method?\n\n2. Based on the numerical solutions of (eq: linexp), which method\n appears more accurate?\n\n3. Cut the stepsize in half and check the error at a given time. Repeat\n a couple of more times. How does the error drop relative to the\n change in stepsize?\n\n4. How do the numerical solutions compare to $y(t) = t + e^{-t}$ when\n you change the initial time? Why?%matplotlib inline\nfrom numlabs.lab4.lab4_functions import initinter41,eulerinter41,midpointinter41\nimport numpy as np\nfrom matplotlib import pyplot as plt\n\ninitialVals={'yinitial': 1,'t_beg':0.,'t_end':1.,'dt':0.25,'c1':-1.,'c2':1.,'c3':1.}\ncoeff = initinter41(initialVals)\ntimeVec=np.arange(coeff.t_beg,coeff.t_end,coeff.dt)\nnsteps=len(timeVec)\nye=[]\nym=[]\ny=coeff.yinitial\nye.append(coeff.yinitial)\nym.append(coeff.yinitial)\nfor i in np.arange(1,nsteps):\n ynew=eulerinter41(coeff,y,timeVec[i-1])\n ye.append(ynew)\n ynew=midpointinter41(coeff,y,timeVec[i-1])\n ym.append(ynew)\n y=ynew\nanalytic=timeVec + np.exp(-timeVec)\ntheFig,theAx=plt.subplots(1,1)\nl1=theAx.plot(timeVec,analytic,'b-',label='analytic')\ntheAx.set_xlabel('time (seconds)')\nl2=theAx.plot(timeVec,ye,'r-',label='euler')\nl3=theAx.plot(timeVec,ym,'g-',label='midpoint')\ntheAx.legend(loc='best')\ntheAx.set_title('interactive 4.1')In general, an *explicit* 2-stage Runge-Kutta method can be\nwritten as, \n\n$$\n\\begin{aligned}\n \\begin{array}{l}\n k_1 = h f(y_n,t_n)\\\\\n k_2 = h f(y_n+b_{21}k_1, t_n+a_2h)\\ \\ \\ \\textbf{eq: explicitrk2} \\\\\n y_{n+1} = y_n + c_1k_1 +c_2k_2\n \\end{array}\n\\end{aligned}\n$$\n \n The scheme is said to be\n*explicit* since a given stage does not depend\n*implicitly* on itself, as in the backward Euler method ,\nor on a later stage.\n\nOther explicit second-order schemes can be derived by comparing the\nformula [eq: explicitrk2](#eq: explicitrk2) to the second-order Taylor method and\nmatching terms to determine the coefficients $a_2$, $b_{21}$, $c_1$ and\n$c_2$.\n\nSee [Appendix midpoint](#app_midpoint) for the derivation of the midpoint\nmethod.## The Runge-Kutta Tableau \n\nA general s-stage Runge-Kutta method can be written as,\n$$\n\\begin{aligned}\n \\begin{array}{l}\n k_i = h f(y_n+ {\\displaystyle \\sum_{j=1}^{s} } b_{ij}k_j, t_n+a_ih), \n \\;\\;\\; i=1,..., s\\\\\n y_{n+1} = y_n + {\\displaystyle \\sum_{j=1}^{s}} c_jk_j \n \\end{array}\n \\end{aligned}\n$$\n\n\n\nAn *explicit* Runge-Kutta method has $b_{ij}=0$ for\n$i\\leq j$, i.e. a given stage $k_i$ does not depend on itself or a later\nstage $k_j$.\n\nThe coefficients can be expressed in a tabular form known as the\nRunge-Kutta tableau. \n\n$$\n\\begin{array}{|c|c|cccc|c|} \\hline\ni & a_i &{b_{ij}} & & && c_i \\\\ \\hline\n1 & a_1 & b_{11} & b_{12} & ... & b_{1s} & c_1\\\\\n2 & a_2 & b_{21} & b_{22} & ... & b_{2s} & c_2\\\\ \n\\vdots & \\vdots & \\vdots & \\vdots & & \\vdots & \\vdots\\\\\ns &a_s & b_{s1} & b_{s2} & ... & b_{ss} & c_s\\\\\\hline\n{j=} & & 1 \\ 2 & ... & s & \\\\ \\hline\n\\end{array}\n$$ \nAn explicit scheme will be strictly lower-triangular.\n\nFor example, a general 2-stage Runge-Kutta method, \n\n$$\n\\begin{aligned}\n \\begin{array}{l}\n k_1 = h f(y_n+b_{11}k_1+b_{12}k_2,t_n+a_1h)\\\\\n k_2 = h f(y_n+b_{21}k_1+b_{22}k_2, t_n+a_2h)\\\\\n y_{n+1} = y_n + c_1k_1 +c_2k_2\n \\end{array}\n \\end{aligned}\n $$ \n \n has the coefficients,\n\n$$\n\\begin{array}{|c|c|cc|c|} \\hline\ni & a_i & {b_{ij}} & & c_i \\\\ \\hline\n1 & a_1 & b_{11} & b_{12} & c_1\\\\\n2 & a_2 & b_{21} & b_{22} & c_2\\\\ \\hline\n{j=} & & 1 & 2 & \\\\ \\hline\n\\end{array}\n$$\n\n\n\nIn particular, the midpoint method is given by the tableau,\n\n$$\n\\begin{array}{|c|c|cc|c|} \\hline\ni & a_i & {b_{ij}} & & c_i \\\\ \\hline\n1 & 0 & 0 & 0 & 0\\\\\n2 & \\frac{1}{2} & \\frac{1}{2} & 0 & 1\\\\ \\hline\n{j=} & & 1 & 2 & \\\\ \\hline\n\\end{array}\n$$\n\n**Problem tableau**: Write out the tableau for\n\n1. [Heun’s method](#eq_heuns)\n\n2. the fourth-order Runge-Kutta method ([lab4:eq:rk4]) discussed in the\n next section.\n\n## Explicit Fourth-Order Runge-Kutta Method \n\n\n\n\nExplicit Runge-Kutta methods are popular as each stage can be calculated\nwith one function evaluation. In contrast, implicit Runge-Kutta methods\nusually involves solving a non-linear system of equations in order to\nevaluate the stages. As a result, explicit schemes are much less\nexpensive to implement than implicit schemes.\n\nHowever, there are cases in which implicit schemes are necessary and\nthat is in the case of *stiff* sets of equations. See\nsection 16.6 of Press et al. for a discussion. For these labs, we will\nfocus on non-stiff equations and on explicit Runge-Kutta methods.\n\nThe higher-order Runge-Kutta methods can be derived by in manner similar\nto the midpoint formula. An s-stage method is compared to a Taylor\nmethod and the terms are matched up to the desired order.\n\nMethods of order $M > 4$ require $M+1$ or $M+2$ function evaluations or\nstages, in the case of explicit Runge-Kutta methods. As a result,\nfourth-order Runge-Kutta methods have achieved great popularity over the\nyears as they require only four function evaluations per step. In\nparticular, there is the classic fourth-order Runge-Kutta formula:\n\n$$\n\\begin{aligned}\n \\begin{array}{l}\n k_1 = h f(y_n,t_n)\\\\\n k_2 = h f(y_n+\\frac{k_1}{2}, t_n+\\frac{h}{2})\\\\\n k_3 = h f(y_n+\\frac{k_2}{2}, t_n+\\frac{h}{2})\\\\\n k_4 = h f(y_n+k_3, t_n+h)\\\\\n y_{n+1} = y_n + \\frac{k_1}{6}+ \\frac{k_2}{3}+ \\frac{k_3}{3} + \\frac{k_4}{6}\n \\end{array}\n \\end{aligned} \n$$\n\n\n\n**Problem rk4**: In the demo below, compare compare solutions to the test\nproblem (eq: test)\n\n$$\n\\frac{dy}{dt} = -y +t +1, \\;\\;\\;\\; y(0) =1\\ \\ \\ \\mathbf{eq: test}\n$$ \n\ngenerated with the\nfourth-order Runge-Kutta method to solutions generated by the forward\nEuler and midpoint methods.\n\n1. Based on the numerical solutions of (eq: test), which of the\n three methods appears more accurate?\n\n2. Again determine how the error changes relative to the change in\n stepsize, as the stepsize is halved.from numlabs.lab4.lab4_functions import initinter41,eulerinter41,midpointinter41,\\\n rk4ODEinter41\ninitialVals={'yinitial': 1,'t_beg':0.,'t_end':1.,'dt':0.05,'c1':-1.,'c2':1.,'c3':1.}\ncoeff = initinter41(initialVals)\ntimeVec=np.arange(coeff.t_beg,coeff.t_end,coeff.dt)\nnsteps=len(timeVec)\nye=[]\nym=[]\nyrk=[]\ny=coeff.yinitial\nye.append(coeff.yinitial)\nym.append(coeff.yinitial)\nyrk.append(coeff.yinitial)\nfor i in np.arange(1,nsteps):\n ynew=eulerinter41(coeff,y,timeVec[i-1])\n ye.append(ynew)\n ynew=midpointinter41(coeff,y,timeVec[i-1])\n ym.append(ynew)\n ynew=rk4ODEinter41(coeff,y,timeVec[i-1])\n yrk.append(ynew)\n y=ynew\nanalytic=timeVec + np.exp(-timeVec)\ntheFig=plt.figure(0)\ntheFig.clf()\ntheAx=theFig.add_subplot(111)\nl1=theAx.plot(timeVec,analytic,'b-',label='analytic')\ntheAx.set_xlabel('time (seconds)')\nl2=theAx.plot(timeVec,ye,'r-',label='euler')\nl3=theAx.plot(timeVec,ym,'g-',label='midpoint')\nl4=theAx.plot(timeVec,yrk,'m-',label='rk4')\ntheAx.legend(loc='best')\ntheAx.set_title('interactive 4.2')## Embedded Runge-Kutta Methods: Estimate of the Truncation Error \n\n\n\nIt is possible to find two methods of different order which share the\nsame stages $k_i$ and differ only in the way they are combined, i.e. the\ncoefficients $c_i$. For example, the original so-called embedded\nRunge-Kutta scheme was discovered by Fehlberg and consisted of a\nfourth-order scheme and fifth-order scheme which shared the same six\nstages.\n\nIn general, a fourth-order scheme embedded in a fifth-order scheme will\nshare the stages \n\n$$\n\\begin{aligned}\n \\begin{array}{l}\n k_1 = h f(y_n,t_n)\\\\\n k_2 = h f(y_n+b_{21}k_1, t_n+a_2h)\\\\\n \\vdots \\\\\n k_6 = h f(y_n+b_{51}k_1+ ...+b_{56}k_6, t_n+a_6h)\n \\end{array}\n \\end{aligned}\n$$\n\n \n\n\n\n\nThe fifth-order formula takes the step: \n\n$$\n y_{n+1}=y_n+c_1k_1+c_2k_2+c_3k_3+c_4k_4+c_5k_5+c_6k_6\n$$ \n\nwhile the\nembedded fourth-order formula takes a different step:\n\n\n\n$$\n y_{n+1}^*=y_n+c^*_1k_1+c^*_2k_2+c^*_3k_3+c^*_4k_4+c^*_5k_5+c^*_6k_6\n$$\n\nIf we now take the difference between the two numerical estimates, we\nget an estimate $\\Delta_{\\rm spec}$ of the truncation error for the\nfourth-order method, \n\n\n$$\n \\Delta_{\\rm est}(i)=y_{n+1}(i) - y_{n+1}^{*}(i) \n= \\sum^{6}_{i=1}(c_i-c_{i}^{*})k_i\n$$ \n\nThis will prove to be very useful\nin the next lab where we provide the Runge-Kutta algorithms with\nadaptive stepsize control. The error estimate is used as a guide to an\nappropriate choice of stepsize.\n\nAn example of an embedded Runge-Kutta scheme was found by Cash and Karp\nand has the tableau: $$\n\\begin{array}{|c|c|cccccc|c|c|} \\hline\ni & a_i & {b_{ij}} & & & & & & c_i & c^*_i \\\\ \\hline\n1 & & & & & & & & \\frac{37}{378} & \\frac{2825}{27648}\\\\\n2 & \\frac{1}{5} & \\frac{1}{5}& & & & & & 0 &0 \\\\\n3 & \\frac{3}{10} & \\frac{3}{40}&\\frac{9}{40}& & & & &\\frac{250}{621}&\\frac{18575}{48384}\\\\\n4 & \\frac{3}{5}&\\frac{3}{10}& -\\frac{9}{10}&\\frac{6}{5}& & & &\\frac{125}{594}& \\frac{13525}{55296}\\\\\n5 & 1 & -\\frac{11}{54}&\\frac{5}{2}&-\\frac{70}{27}&\\frac{35}{27}& & & 0 & \\frac{277}{14336}\\\\\n6 & \\frac{7}{8}& \\frac{1631}{55296}& \\frac{175}{512}&\\frac{575}{13824}& \\frac{44275}{110592}& \\frac{253}{4096}& & \\frac{512}{1771} & \\frac{1}{4}\\\\\\hline\n{j=} & & 1 & 2 & 3 & 4 & 5 & 6 & & \\\\ \\hline\n\\end{array}\n$$\n**Problem embedded**: Though the error estimate is for the embedded\nfourth-order Runge-Kutta method, the fifth-order method can be used in\npractice for calculating the solution, the assumption being the\nfifth-order method should be at least as accurate as the fourth-order\nmethod. In the demo below, compare solutions of the test problem\n(eq: test2]) \n\n$$\\frac{dy}{dt} = -y +t +1, \\;\\;\\;\\; y(0) =1\\ \\ \\ \\mathbf{eq: test2}$$\n\ngenerated by the fifth-order method with solutions generated by the\nstandard fourth-order Runge-Kutta method. Which method\nis more accurate? Again, determine how the error decreases as you halve\nthe stepsizes. import numpy as np\nfrom matplotlib import pyplot as plt\n\nfrom numlabs.lab4.lab4_functions import initinter41,rk4ODEinter41,rkckODEinter41\ninitialVals={'yinitial': 1,'t_beg':0.,'t_end':1.,'dt':0.2,'c1':-1.,'c2':1.,'c3':1.}\ncoeff = initinter41(initialVals)\n\ntimeVec=np.arange(coeff.t_beg,coeff.t_end,coeff.dt)\nnsteps=len(timeVec)\nye=[]\nym=[]\nyrk=[]\nyrkck=[]\ny1=coeff.yinitial\ny2=coeff.yinitial\nyrk.append(coeff.yinitial)\nyrkck.append(coeff.yinitial)\nfor i in np.arange(1,nsteps):\n ynew=rk4ODEinter41(coeff,y1,timeVec[i-1])\n yrk.append(ynew)\n y1=ynew \n ynew=rkckODEinter41(coeff,y2,timeVec[i-1])\n yrkck.append(ynew)\n y2=ynew \nanalytic=timeVec + np.exp(-timeVec)\ntheFig,theAx=plt.subplots(1,1)\nl1=theAx.plot(timeVec,analytic,'b-',label='analytic')\ntheAx.set_xlabel('time (seconds)')\nl2=theAx.plot(timeVec,yrkck,'g-',label='rkck')\nl3=theAx.plot(timeVec,yrk,'m-',label='rk')\ntheAx.legend(loc='best')\ntheAx.set_title('interactive 4.3')# moving from a notebook to a library\n\nIf we want our ODE routines to be more generally useful, we need to lift\ntwo restrictions from the code:\n\n1. Inital conditions are currently specified in the main script as a dictionary, e.g.:\n \n ```python\n \n initialVals={'yinitial': 1,'t_beg':0.,'t_end':1.,\n 'dt':0.2,'c1':-1.,'c2':1.,'c3':1.} \n \n ``` \n and then converted to a named tuple in [initinter41](https://github.com/phaustin/numeric/blob/master/numlabs/lab4/lab4_functions.py#L5-L9)\n \n We need to move this into an external configuration file that we can keep track of using\n version control, so we can keep the library code and the input\n and output files in separate folders and keep a record of our runs.\n \n2. The derivatives are hardwired into the library, for example in\n [eulerinter41](https://github.com/phaustin/numeric/blob/3bab591fb584abbc95757eb40ae5c83dce3cb94a/numlabs/lab4/lab4_functions.py#L15-L17), \n . We need to be able to have integrators work with any derivative function.\n \n## Writing a config file\n\nPython has a variety of configuration libraries, including [configparser](https://docs.python.org/3.4/library/configparser.html) in the standard library. This is\noverkill, however, for our simple programs. We just need a way to input and output a\ndictionary in human readable form. One example of how to do this is\n[write_init.py](https://github.com/phaustin/numeric/blob/master/numlabs/lab4/example/write_init.py):\n\n```python\n\n \"\"\"\n write the initial condition file for the simple oscillator\n example\n \"\"\"\n\n import json\n\n initialVals={'yinitial': [0.,1.],'t_beg':0.,'t_end':40.,'dt':0.1,'c1':0.,'c2':1.}\n initialVals['comment'] = 'written Sep. 29, 2015'\n initialVals['plot_title'] = 'simple damped oscillator run 1'\n\n with open('run_1.json','w') as f:\n f.write(json.dumps(initialVals,indent=4))\n```\n\nWhen you run this from the command line or IPython with:\n\n\n In [19]: run write_init\n \nyou get a json [Javascript Object Notation](https://en.wikipedia.org/wiki/JSON)\nfile that looks like this:\n\n```javascript\n\n {\n \"t_beg\": 0.0,\n \"c1\": 0.0,\n \"c2\": 1.0,\n \"t_end\": 40.0,\n \"dt\": 0.1,\n \"plot_title\": \"simple damped oscillator run 1\",\n \"comment\": \"written Sep. 29, 2015\",\n \"yinitial\": [\n 0.0,\n 1.0\n ]\n }\n```\nThis format is simple enough to change with a text editor.\n\nTo load this into a program, do something like [read_init.py](https://github.com/phaustin/numeric/blob/master/numlabs/lab4/example/read_init.py):\n\n```python\n\n import json\n from collections import namedtuple\n\n with open('run_1.json','r') as f:\n init_dict=json.load(f)\n\n print('as a dictionary:\\n{}\\n'.format(init_dict))\n\n #either use this as a dict or convert to a namedtuple\n initvals=namedtuple('initvals','dt c1 c2 t_beg t_end yinitial comment plot_title')\n theCoeff=initvals(**init_dict)\n\n print('as a namedtuple:\\n{}'.format(theCoeff))\n\n```\n\nwhich produces:\n\n```\nIn [21]: run read_init\nas a dictionary:\n{'yinitial': [0.0, 1.0], 'c2': 1.0, 'plot_title': 'simple damped oscillator run 1', 'comment': 'written Sep. 29, 2015', 't_end': 40.0, 'c1': 0.0, 't_beg': 0.0, 'dt': 0.1}\n\nas a namedtuple:\ninitvals(dt=0.1, c1=0.0, c2=1.0, t_beg=0.0, t_end=40.0, yinitial=[0.0, 1.0], comment='written Sep. 29, 2015', plot_title='simple damped oscillator run 1')\n```\n\n\n## Passing a derivative function to an integrator\n\nIn python, functions are first class objects, which means you can pass them around like any\nother datatype, no need to get function handles as in matlab or Fortran. The integrators\nin [test.py](https://github.com/phaustin/numeric/blob/master/numlabs/lab4/example/test.py)\nhave been written to accept a derivative function of the form:\n\n```python\n def derivs4(coeff, y):\n```\n\ni.e. as long as the derivative can be written in terms of coefficients\nand the previous value of y, the integrator will move the ode ahead one\ntimestep. If we wanted coefficients that were a function of time, we would\nneed to also include those functions the coeff namedtuple, and add keep track of the\ntimestep through the integration.\n\nHere's an example using foward euler to integrate the harmonic oscillator%matplotlib inline\n\nimport json\nfrom numlabs.lab4.example.test import read_init,euler4\n#\n# specify the derivs function\n#\ndef derivs(coeff, y):\n f=np.empty_like(y) #create a 2 element vector to hold the derivitive\n f[0]=y[1]\n f[1]= -1.*coeff.c1*y[1] - coeff.c2*y[0]\n return f\n#\n# first make sure we have an input file in this directory\n#\ninitialVals={'yinitial': [0.,1.],'t_beg':0.,'t_end':40.,'dt':0.1,'c1':0.,'c2':1.}\ninitialVals['comment'] = 'written Sep. 29, 2015'\ninitialVals['plot_title'] = 'simple damped oscillator run 1'\n\ninfile='run_1.json'\nwith open(infile,'w') as f:\n f.write(json.dumps(initialVals,indent=4))\n#\n# now read the initial information into a namedtuple coeff\n#\n \ninfile='run_1.json'\ncoeff=read_init(infile)\n\n#\n# integrate and save the result in savedata\n#\ntime=np.arange(coeff.t_beg,coeff.t_end,coeff.dt)\ny=coeff.yinitial\nnsteps=len(time) \nsavedata=np.empty([nsteps],np.float64)\nfor i in range(nsteps):\n y=euler4(coeff,y,derivs)\n savedata[i]=y[0]\n\ntheFig,theAx=plt.subplots(1,1,figsize=(8,8))\ntheAx.plot(time,savedata,'o-')\ntheAx.set_title(coeff.plot_title)\ntheAx.set_xlabel('time (seconds)')\ntheAx.set_ylabel('y0') \n\n\n\n**problem coding A**: Try out [the lab4 example](https://github.com/phaustin/numeric/tree/master/numlabs/lab4/example):\n\nAs set up above, test.py\nsolved the damped, harmonic oscillator with the (unstable) forward Euler method.\n\n1. Write a new routine that solves the harmonic oscilator using [Heun’s method](#eq_heuns)\n along the lines of the routines in [lab4_functions.py](https://github.com/phaustin/numeric/blob/master/numlabs/lab4/lab4_functions.py)\n\n Hand in a notebook with the code and a plot.\n\n \n**problem coding B**:\n\n1. Now solve the following test equation by both the midpoint and\n Heun’s method and compare. $$f(y,t) = t - y + 1.0$$ Choose two sets\n of initial conditions and investigate the behaviour.\n\n2. Is there any difference between the two methods when applied to\n either problem? Should there be? Explain by analyzing the steps\n that each method is taking.\n\n \n**problem coding C**:\n\n6. Solve the Newtonian cooling equation of lab 1 by any of the above\n methods. \n\n7. Hand in some sample plots along with the parameter values and\n initial conditions used.# Mathematical Notes \n\n\n\n\n\n\n## Note on the Derivation of the Second-Order Runge-Kutta Methods\n\nA general s-stage Runge-Kutta method can be written as,\n\n$$\n\\begin{aligned}\n \\begin{array}{l}\n k_i = h f(y_n+ {\\displaystyle \\sum_{j=1}^{s} } b_{ij}k_j, t_n+a_ih), \n \\;\\;\\; i=1,..., s\\\\\n y_{n+1} = y_n + {\\displaystyle \\sum_{j=1}^{s}} c_jk_j \n \\end{array}\\end{aligned}$$ \n \n where\n\n${\\displaystyle \\sum_{j=1}^{s} } b_{ij} = a_i$.\n\nIn particular, an *explicit* 2-stage Runge-Kutta method can\nbe written as, \n\n$$\n\\begin{aligned}\n \\begin{array}{l}\n k_1 = h f(y_n,t_n)\\\\\n k_2 = h f(y_n+ak_1, t_n+ah)\\\\\n y_{n+1} = y_n + c_1k_1 +c_2k_2\n \\end{array}\n \\end{aligned}\n $$\n where \n \n $ b_{21} = a_2 \\equiv a$. \n \n So we want\nto know what values of $a$, $c_1$ and $c_2$ leads to a second-order\nmethod, i.e. a method with an error proportional to $h^3$.\n\nTo find out, we compare the method against a second-order Taylor expansion,\n\n\n\n$$\n y(t_n+h) = y(t_n) + hy^\\prime(t_n) + \\frac{h^2}{2}y^{\\prime \\prime}(t_n)\n + O(h^3)\n$$\n\nSo for the $y_{n+1}$ to be second-order accurate, it must match the\nTaylor method. In other words, $c_1k_1 +c_2k_2$ must\nmatch $hy^\\prime(t_n) + \\frac{h^2}{2}y^{\\prime \\prime}$. To do this, we\nneed to express $k_1$ and $k_2$ in terms of derivatives of $y$ at time\n$t_n$.\n\nFirst note, $k_1 = hf(y_n, t_n) = hy^\\prime(t_n)$.\n\nNext, we can expand $k_2$ about $(y_n.t_n)$, \n\n\n\n$$\nk_2 = hf(y_n+ak_1, t_n+ah) = h(f + haf_t + haf_yy^\\prime + O(h^2))\n$$\n\n\n\nHowever, we can write $y^{\\prime \\prime}$ as, $$\n y^{\\prime \\prime} = \\frac{df}{dt} = f_t + f_yy^\\prime$$ This allows us\nto rewrite $k_2$ in terms of $y^{\\prime \\prime}$,\n\n$$k_2 = h(y^\\prime + hay^{\\prime \\prime}+ O(h^2))$$\n\nSubstituting these expressions for $k_i$ back into the Runge-Kutta\nformula gives us,\n$$y_{n+1} = y_n + c_1hy^\\prime +c_2h(y^\\prime + hay^{\\prime \\prime})$$\nor $$y_{n+1} = y_n + h(c_1 +c_2)y^\\prime + h^2(c_2a)y^{\\prime \\prime}$$\n\nIf we compare this against the second-order Taylor method,\nwe see that we need, \n\n$$\n\\begin{aligned}\n \\begin{array}{l}\n c_1 + c_2 = 1\\\\\n a c_2 = \\frac{1}{2}\n \\end{array}\n \\end{aligned}\n $$ \n \nfor the Runge-Kutta method to be\nsecond-order.\n\n\nIf we choose $a = 1/2$, this implies $c_2 = 1$ and $c_1=0$. This gives\nus the midpoint method.\n\nHowever, note that other choices are possible. In fact, we have a\n*one-parameter family* of second-order methods. For example\nif we choose, $a=1$ and $c_1=c_2=\\frac{1}{2}$, we get the\n*modified Euler method*,\n\n\n\n$$\n\\begin{aligned}\n \\begin{array}{l}\n k_1 = h f(y_n,t_n)\\\\\n k_2 = h f(y_n+k_1, t_n+h)\\\\\n y_{n+1} = y_n + \\frac{1}{2}(k_1 +k_2)\n \\end{array}\n \\end{aligned}$$ \n \n while the choice\n$a=\\frac{2}{3}$, $c_1=\\frac{1}{4}$ and $c_2=\\frac{3}{4}$, gives us\n*Heun’s method*,\n\n\n\n\n$$\n\\begin{aligned}\n \\begin{array}{l}\n k_1 = h f(y_n,t_n)\\\\\n k_2 = h f(y_n+\\frac{2}{3}k_1, t_n+\\frac{2}{3}h)\\\\\n y_{n+1} = y_n + \\frac{1}{4}k_1 + \\frac{3}{4}k_2\n \\end{array}\n \\end{aligned}\n$$# Glossary \n\n\n- **driver** A routine that calls the other routines to solve the\n problem.\n\n- **embedded Runge-Kutta methods**: Two Runge-Kutta\n methods that share the same stages. The difference between the solutions\n give an estimate of the local truncation error.\n\n- **explicit** In an explicit numerical scheme, the calculation of the solution at a given\n step or stage does not depend on the value of the solution at that step\n or on a later step or stage.\n \n- **fourth-order Runge-Kutta method** A popular fourth-order, four-stage, explicit Runge-Kutta\n method.\n\n- **implicit**: In an implicit numerical scheme, the\n calculation of the solution at a given step or stage does depend on the\n value of the solution at that step or on a later step or stage. Such\n methods are usually more expensive than implicit schemes but are better\n for handling stiff ODEs.\n\n- **midpoint method** : A two-stage,\n second-order Runge-Kutta method.\n\n- **stages**: The approximations\n to the derivative made in a Runge-Kutta method between the start and end\n of a step.\n\n- **tableau** The tableau for a Runge-Kutta method\n organizes the coefficients for the method in tabular form.\n\n\n"},"license":{"kind":"string","value":"no_license"},"path":{"kind":"string","value":"/lab4/lab4.ipynb"},"repo_name":{"kind":"string","value":"tjarnikova/numeric"},"chain_length":{"kind":"number","value":7,"string":"7"}}},{"rowIdx":4876,"cells":{"content":{"kind":"string","value":"## Superposition of energy eigenstates\n\nQuestions? kasper.peeters@durham.ac.uk\n\nThis notebook shows the time-evolution of the sum of two energy eigenstates\nfor a particle in a box (in 1 dimension). This corresponds to problem 3 of\nchapter 12 of the Durham University Mathematical Physics II lecture notes.import numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib import animation, rc\nfrom IPython.display import HTMLThe normalised energy eigenfunctions for a particle in a box of unit size are\n$$\\phi_n(x) = \\sqrt{2}\\sin(n\\pi x)$$\nand (setting $\\hbar=1$) the energy eigenvalues are\n$$ E_n = \\frac{1}{2} n^2 \\pi^2\\,.$$def phi(n, x):\n return np.sqrt(2)*np.sin(n*np.pi*x)\n\ndef E(n):\n return n**2*np.pi**2/2Let's plot the first three for reference:x = np.linspace(0,1,100)\nplt.plot(x, phi(1,x), x,phi(2,x), x, phi(3,x),'-')We will look at a wave function which is a superposition of the 1st and 2nd \nenergy eigenstates,\n$$ \\psi(x) = \\frac{1}{\\sqrt{2}} \\Big(\n\\phi_1(x) + \\phi_2(x) \\Big)\\,.$$\nThe time-evolution can be found by multiplying each term with its own\n$\\exp(-iE t)$ factor, for the appropriate $E$. The probability density is also\ndefined below, by taking the complex norm squared.def psi(x,t):\n return 1/np.sqrt(2)*(np.exp(-1j*E(1)*t)*phi(1,x) + np.exp(-1j*E(2)*t)*phi(2,x))\n\ndef P(x,t):\n return np.real_if_close(psi(x,t)*np.conj(psi(x,t)))For $t=0$ and $t=1$ the probability density looks as follows:plt.plot(x, P(x,0), x, P(x,1),'-')Let's make a movie. The block below sets up a figure environment with\nappropriate axes ranges $0\\leq x\\leq 1$ and $0\\leq y \\leq 3$.%%capture\nfig, ax = plt.subplots();\nax.set_xlim(( 0, 1))\nax.set_ylim(( 0, 3))\nourplot, = ax.plot([], [])To animate, we need to define a function which returns the `ourplot` figure\nfor a given time $t$. It actually receives the frame number $i$, and we will\nset $t=i/50.0/f$ where $f$ is the frequency. So that at $i=50$, we have\n$t=T$ with $T$ the period of oscillation.fpp = 50 # number of animation frames per period\nw = E(2)-E(1) # angular frequency, see the problem in the notes\nfreq = w/2/np.pi # frequency\nT = 1/freq # period\n\ndef animate(i):\n xv = np.linspace(0, 3, 200)\n yv = P(xv, i*T/fpp)\n ourplot.set_data(xv, yv)\n return (ourplot,)\nTThe plot is then made by constructing a `FuncAnimation` object, giving it \nthe canvas object `fig` in which to draw, the function `animate` which returns\nthe figure for a given frame, the number of frames, and the delay/interval (in ms) between\neach frame.anim = animation.FuncAnimation(fig, animate, frames=fpp, interval=50)\nHTML(anim.to_jshtml())Note how you can easily see that the expectation value of the position oscillates\nwith frequency $\\omega/(2\\pi)$ (the duration of one run of the animation),\nwhere $\\omega = E_2 - E_1$, as computed in the problem.## More complicated example\n\nA more complicated example consists of a superposition of 4 energy eigenstates,\nsuch as to approximate the evolution of \n$$\\psi(x, t=0) = \\sqrt{858} x (x-1)^5\\, .$$\nThe expansion coefficients are stated below but can be obtained by projecting\nthe above wave function on each eigenstate.c = [-0.622, -0.655, -0.370, -0.177]\n\ndef psi2(x,t):\n tot = 0\n for i in range(1,5):\n tot += c[i-1] * np.exp(-1j*E(i)*t)*phi(i,x)\n return tot\n\ndef P2(x,t):\n return np.real_if_close(psi2(x,t)*np.conj(psi2(x,t)))\nplt.plot(x, np.real(psi2(x, 0)), 'r--', x, np.sqrt(858)*x*(x-1)**5, 'b-')\nfpp = 100 # number of animation frames per period\nT = 1\n\ndef animate2(i):\n xv = np.linspace(0, 3, 200)\n yv = P2(xv, i*T/fpp)\n ourplot.set_data(xv, yv)\n return (ourplot,)\nanim2 = animation.FuncAnimation(fig, animate2, frames=200, interval=50)\nHTML(anim2.to_jshtml())"},"license":{"kind":"string","value":"non_permissive"},"path":{"kind":"string","value":"/sum_of_eigenstates.ipynb"},"repo_name":{"kind":"string","value":"kpeeters/quantum_notebooks"},"chain_length":{"kind":"number","value":9,"string":"9"}}},{"rowIdx":4877,"cells":{"content":{"kind":"string","value":"# TRANSFORMASI DATA import pandas as pd\nimport numpy as np\npcademo = pd.read_csv('G:\\Kuliah\\Data Mining\\Transformasi Data\\Praktek\\pcademo.csv')\nx = pcademo.iloc[0:51,1:20]\nx.tall()"},"license":{"kind":"string","value":"no_license"},"path":{"kind":"string","value":"/Transformasi Data/Latihan/Untitled.ipynb"},"repo_name":{"kind":"string","value":"rifqirabbanie/Data-Mining"},"chain_length":{"kind":"number","value":1,"string":"1"}}},{"rowIdx":4878,"cells":{"content":{"kind":"string","value":"### Solutions of PS2\n\nThis notebook is in html. To be able to run it, please click: [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/gunerilhan/economicgrowth/blob/master/ps2/ps2.ipynb)# First import the packages we'll need\n# Numpy is for numerical analysis\nimport numpy as np\n# Pandas is for data storage and manipulation\nimport pandas as pd\n# Matplotlib and seaborn are for plotting\nimport matplotlib.pyplot as plt\n%matplotlib inline\nimport seaborn as sns\nfrom matplotlib import style\nstyle.use('https://gunerilhan.github.io/img/fivethirtyeight-modified.mplstyle')\n# Different color palettes that can be used in plots\ncolors = {0:['#264653','#219D8F','#E9C46A','#F4A261','#E76F51'],\n 1:['#003049','#D62828','#F77F00','#FCBF49','#EAE2B7'],\n 2:['#F72585','#7209B7','#3A0CA3','#4361EE','#4CC9F0'],\n 3: ['#165aa7','#cb495c','#bb60d5','#f47915','#06ab54','#002070','#b27d12','#007030']}\n\n\n1. Go to the following website: https://www.rug.nl/ggdc/productivity/pwt/\n\nDownload Penn World Table version 10.0 database in your preferred format. Consider the following countries: the UK, Argentina, Denmark, Central African Republic, Nigeria and Italy.\n\na. Using data in 2019, calculate real output per worker and human capital index for each of the countries listed above. Use \n - 'cgdpe' (Expenditure-side real GDP at current PPPs (in mil. 2017US$)) as a measure of real GDP,\n - 'emp' (Number of persons engaged (in millions)) as a measure of employment, \n - and 'hc' (Human capital index, based on years of schooling and returns to education) as a measure of human capital.\n\nb. Assume a Cobb-Douglas production function with $\\alpha=1/3$. Suppose countries differ only with respect to their investment rates. For each country listed above, calculate Solow model's predicted output per worker relative to the UK output per worker. For investment rates of countries, use average of variable `csh\\_i' (Share of gross capital formation at current PPPs) over the years 1970-2019. Generate a table comparing Solow model's predicted output per worker relative to the UK with actual output per worker relative to the UK (in 2019, or in the latest available year). Hint, I ask you to do an exercise similar to what we have done in lecture 3. Briefly comment on the table. \n\nc. Now, assume that countries differ with respect to their investment rates and employment growth rates. Assume that depreciation rates in all countries are equal to 5%, $\\delta=0.05$. Calculate average annual employment growth rates of the above listed countries from 1970 to 2019. Repeat the exercise in part b. Create a table that compares Solow model's predicted output per worker differences with actual output per worker differences relative to the UK. Briefly comment on the table.\n\nd. Now, assume that countries differ not only in their investment and employment growth rates but also in their human capital. For this exercise use variable hc in 2019 (Human capital index, based on years of schooling and returns to education) as $h$ in the Solow model. Assume that depreciation rates in all countries are equal to 5%, $\\delta=0.05$. Repeat part c while considering human capital differences across countries. Create a table and briefly comment on it.\n\ne.Do your predictions approach to actual income differences as you take into account more variables? \n\n# Here, I download the Penn World Tables legend\ndf_legend = pd.read_excel('https://www.rug.nl/ggdc/docs/pwt100.xlsx',sheet_name='Legend')\ndf_legend.dropna(subset=['Variable name','Variable definition'],inplace=True)\n# I create a dictionary of variables names and definitions\ndf_legend = dict(zip(df_legend['Variable name'],df_legend['Variable definition']))\n# Here are the variables, we will need in this analysis\nprint('cgdpe = ', df_legend['cgdpe'])\nprint('rnna = ', df_legend['rnna'])\nprint('emp = ', df_legend['emp'])\nprint('hc =', df_legend['hc'])\nprint('csh_i =', df_legend['csh_i'])\n# now download the data\ndf = pd.read_excel('https://www.rug.nl/ggdc/docs/pwt100.xlsx',sheet_name='Data')\n# create GDP per worker \ndf['gdpPerworker'] = df['cgdpe']/df.emp\n# Penn World Tables contain many variables, but these are the variables ...\n# I need to create the table asked in part a\nvar_list = ['countrycode','country','cgdpe','emp','hc','year']\n# List of countries\ncountry_list = ['GBR','ARG','DNK','CAF','NGA','ITA']\n## Here, we create the table asked in part a)\n\n# This cell does five things:\n# 1) It selects the countries in my list: df.countrycode.isin(country_list)\n# 2) It selects year 2017: (df.year==201-)\n# 3) It selects the variables that I want to display :\n# [['country','year','gdpPerworker','hc','emp','pop']]\n# 4) It rounds the variables to 2 decimal points: .round(decimals=2)\n# 5) Sets country as index: .set_index('country') \ndf[df.countrycode.isin(country_list) & (df.year==2019)][\n ['country','year','gdpPerworker','hc','emp','pop']].round(decimals=2)\n## To solve part b, I first need to calculate average investment rate ...\n# for each country from 1970 to 2009\n\n# This cell does the following things:\n# 1) It selects data from years 1970 to 2019: df[df.year.isin(np.arange(1970,2020))]\n# np.arange(1970,2020) creates a list from 1970 to 2019.\n# intervals on python is right-open. Hence np.arange(1970,2020) creates \n# an array that does not inclue 2020\n# 2) it takes averages of the investment rates for each country:\n #.groupby(['countrycode','country'])['csh_i'].mean()\n# 3) it resets data index\n# 4) it renames 'csh_i' variable to \"Investment rate\"\n \nbegin,end=1970,2019\ndf_fundamentals = (df[df.year.isin(np.arange(begin,end+1))]\n .groupby(['countrycode','country'])['csh_i'].mean().reset_index().rename(\n columns={'csh_i':'Investment rate'}))\n# Now look at he investment rate of the countries we calculated above\n# In fact we calculated invesment rate for each country in our data\n# we display investment rate only for the countries we are interested in\n\ndf_fundamentals.set_index('countrycode',inplace=True)\ndf_fundamentals.loc[country_list].round(decimals=2)\n# In part c, we need to calculate the average employment growth\n# first sort our data by country and by year\n# we need by year sorting to ensure that 1970 data comes earlier than 2017 data ...\n# in our dataset. We need this in the next cell\ndf.sort_values(['countrycode','year'],inplace=True)\n# This cell calculates the average annual employment growth rate from 1970 to 2017\n# It first selects the years 1970 and 2019\n# For each country it calculate average annual employment growth rate ...\n# using this formula = (emp_2019/emp_1970)^(1/49)-1\n# there are multiple ways of calculating average employment growth rate,...\n# the above is one of them\n\n# Here is how code works:\n# 1) it selects data from years 1970 and 2019: df[df.year.isin([1970,2019])]\n# 2) It groups the data based on countrycodes: .groupby(['countrycode']\n# 3) for each country code, we have 2 observations, from 1970 and from 2019\n# x['emp'].values[0] is the first observation from 1970 \n# x['emp'].values[1] is the second observation from 2019\n# the average annual employment growth is equal to \n# x['emp'].values[1]/x['emp'].values[0])**(1/49)-1, ...\n# where x represent a country\n# 4) Lastly it renames what we calculated as \n# 'Employment growth': .rename(columns={0:'Employment growth'})\n\ndf_emp_growth = (df[df.year.isin([begin,end])]\n .groupby(['countrycode']).apply(lambda x: \n (x['emp'].values[1]/x['emp'].values[0])**(1/(end-begin))-1)\n .reset_index().rename(columns={0:'Employment growth'}))\n# show the employment growth rate for the countries we selected\ndf_emp_growth.set_index('countrycode',inplace=True)\ndf_emp_growth.loc[country_list].round(decimals=3)Above, since data for CAF is missing in 1970, our method did not give a result for CAF. \nAlternatively, we could calculate employment growth for each available year, and take average of the \nemployment growth rates.# merge df_fundamentals data (contains invesment rates) \n# with df_emp_growth (contains employment growth rate)\ndf_fundamentals= df_fundamentals.join(df_emp_growth,how='left')\n# in part d, we will need human capital values\n# merge our df_fundamentals data with the Penn World Tables\n# but, we don't need all of PWT, select only year 2019, and other required variables\ndf_fundamentals = df_fundamentals.join(df[df.year == end].set_index('countrycode')[\n ['emp','gdpPerworker','hc']],how='left')\n# we will calculate Solow's predicted output per worker relative to the UK\n# hence, create a different data just for the UK values\ngbr = df_fundamentals.loc['GBR']\n# now add columns to df_fundamentals data, consisting of corresponding values from the UK\nfor var in ['Investment rate','Employment growth','hc','gdpPerworker']:\n df_fundamentals[f'{var}, GBR'] = gbr[var]\n# here is our data\ndf_fundamentals.loc[country_list].round(decimals=3)\n# We have prepared our data to conduct the required analysis\n# First set our parameter values\nalpha=1/3\ndelta = 0.05\n# In many parts of the analysis, we will write alpha/(1-alpha)...\n# create a new variable to redue typing\nalpham = alpha/(1-alpha)Notice that at the steady state of the Solow model income per worker is equal to\n$$y^\\ast = A^{1/(1-\\alpha)}\\left(\\frac{\\gamma}{\\delta+n} \\right)^{\\alpha/(1-\\alpha)}h.$$\n\nTherefore income per worker ratios are (assuming countries have the same technology, $A$):\n$$\\frac{y_i}{y_{UK}} = \\left(\\frac{\\frac{\\gamma_i}{\\delta+n_i}}{\\frac{\\gamma_{UK}}{\\delta+n_{UK}}} \\right)^{\\alpha/(1-\\alpha)}\\frac{h_i}{h_{UK}}.$$\n\nRewrite the above formula:\n$$\\frac{y_i}{y_{UK}} = \\left(\\frac{\\gamma_i}{\\gamma_{UK}}\\right)^{\\alpha/(1-\\alpha)}\\left(\\frac{\\delta+n_{UK}}{\\delta+n_i}\\right)^{\\alpha/(1-\\alpha)}\\frac{h_i}{h_{UK}}.$$\n\n\nIn part b, we assume countries differ only with respect to their investment rates. Therefore:\n$$\\frac{y_i}{y_{UK}} = \\left(\\frac{\\gamma_i}{\\gamma_{UK}}\\right)^{\\alpha/(1-\\alpha)}.$$\n\n \n# calculate Solow's predicted income per worker ratios as in the above formula\ndf_fundamentals['rel_GDP_pred_inv']=(df_fundamentals['Investment rate']\n /df_fundamentals['Investment rate, GBR'])**alpham\n# calculate the actual output per worker rations from data\ndf_fundamentals['rel_GDP'] = (df_fundamentals['gdpPerworker']/\n df_fundamentals['gdpPerworker, GBR'])In part c, countries differ with respect to their employment growth rates as well as their investment rates:\n\n$$\\frac{y_i}{y_{UK}} = \\left(\\frac{\\frac{\\gamma_i}{\\delta+n_i}}{\\frac{\\gamma_{UK}}{\\delta+n_{UK}}} \\right)^{\\alpha/(1-\\alpha)}.$$\n\n\n## calculate Solow's predicted income per worker ratios as in the above formula\ndf_fundamentals['rel_GDP_pred_inv_emp']=((df_fundamentals['Investment rate']/\n (delta+df_fundamentals['Employment growth']))\n /(df_fundamentals['Investment rate, GBR']/\n (delta+df_fundamentals['Employment growth, GBR'])))**alphamIn part d, countries differ with respect to their human capital, employment growth rate and investment rate\n\n\n\n$$\\frac{y_i}{y_{UK}} = \\left(\\frac{\\frac{\\gamma_i}{\\delta+n_i}}{\\frac{\\gamma_{UK}}{\\delta+n_{UK}}} \\right)^{\\alpha/(1-\\alpha)}\\frac{h_i}{h_{UK}}.$$\n\n## calculate Solow's predicted income per worker ratios as in the above formula\ndf_fundamentals['rel_GDP_pred_inv_emp_hc'] = (df_fundamentals['rel_GDP_pred_inv_emp']*\n df_fundamentals['hc']/df_fundamentals['hc, GBR'])\n# I need this cell to rename the table columns\ncolumn_names = dict(zip(['rel_GDP_pred_inv','rel_GDP_pred_inv_emp','rel_GDP_pred_inv_emp_hc','rel_GDP'],\n ['Prediction, part b','Prediction, part c','Prediction, part d','Actual']))\ndf_fundamentals.loc[country_list]\n# here is Solow's predicted output per worker ratios under different assumptions\nresults_1=(df_fundamentals[\n ['rel_GDP_pred_inv','rel_GDP_pred_inv_emp','rel_GDP_pred_inv_emp_hc','rel_GDP']].rename(\n columns=column_names).round(decimals=2))\nresults_1.loc[country_list]In the above table, as we add more variables into our equation, Solow model's predicted income per worker ratios for Argentian, Central African Republic, and Nigeria are getting closer to its actual level, but not for other countries. fig,ax = plt.subplots(figsize=(6,6))\nax.scatter(df_fundamentals['rel_GDP_pred_inv'],df_fundamentals['rel_GDP'],\n sizes=df_fundamentals['emp'].values,color=colors[1][0],alpha=.7,label='$\\gamma$')\nax.scatter(df_fundamentals['rel_GDP_pred_inv_emp'],df_fundamentals['rel_GDP'],\n sizes=df_fundamentals['emp'].values,color=colors[1][1],alpha=.7,label='$\\gamma,n$')\nax.scatter(df_fundamentals['rel_GDP_pred_inv_emp_hc'],df_fundamentals['rel_GDP'],\n sizes=df_fundamentals['emp'].values,color=colors[1][2],alpha=.7,label='$\\gamma,n,h$')\nax.set_xlabel('Predicted')\nax.set_ylabel('Actual')\nticks = [0,.4,.8,1.2,1.6]\nax.set_xlim(-0.2,1.7)\nax.set_ylim(-0.2,1.7)\nax.set_xticks(ticks)\nax.set_yticks(ticks)\n\n\nax.legend(loc='upper left',frameon=False)\nplt.savefig('./rel_GDP_pred_inv_emp_hc.svg',bbox_inches='tight')When we look at all the countries, the model does well as we control more and more variables. However, as shown in our table, the model does not do as well with 4 examples of advanced economies. We could conclude that the Solow model does in well in accounting for the income per worker differences between developed and developing countries. But, it does not do as well for accounting for the income per worker differences between developed countries, like the UK, Italy and Denmark listed in our question. Productivity differences is the main factor accounting the income per differences among the developed countries.2. Consider the extended Solow model. Suppose the production function is $Y = K^\\alpha (ehL)^{1-\\alpha}.$\n\na. Derive change in capital per effective worker, $\\dot{\\tilde{k}}$, as a function of capital per effective worker, $\\tilde{k}$, and other exogenous variables in the model.\n\nb. Suppose that investment rate is 20%, $\\gamma =.2$, depreciation rate is 5\\%, $\\delta=.05$, population growth rate is 1\\%, $n=.01$, labor-augmenting technological progress rate is 2\\%, $g = .02$, human capital is equal to 1, $h=1$, and capital income share is .33, $\\alpha=.33$. Find steady state capital per effective worker, income per effective worker, consumption per effective worker.\n\nc. Suppose at time $t=0$, the economy is at the steady state, and level of labor-augmenting technology is equal to 1, $e(0)=1$. What's the income per worker level at time $t=20$? Remember that if a variable (say M) grows at a constant rate, say m, then the value of M at time $t$ is equal to $\\exp(mt)$ times the value of M at time $0$, i.e. $M(t)=M(0)\\exp(mt).$\n\nd. Suppose again that at time $t=0$, the economy is at the steady state and level of labor-augmenting technology is equal to 1, $e(0)=1$. Now, suddenly (and unexpectedly) the human capital increases by 10\\%, i.e. $h^{new} = 1.1$. Starting from the steady state you found in part b, simulate the model for 100 periods, and calculate capital per effective worker, capital per worker, income per effective worker, and income per worker at each time period.\n# First, set our parameter values\nalpha = .33\ndelta = .05\nh = 1\nn = 0.01\ng = 0.02\ngamma = .2\n# this is one way of defining a function in python\n# f is our production function, output per effective workers\nf = lambda k,h: k**alpha*h**(1-alpha)\n# this is our kdot function\nkdot = lambda k,h: gamma*k**alpha*h**(1-alpha)-(delta+n+g)*kAt the steady state:\n $$\\tilde{k}^\\ast = \\left(\\frac{\\gamma}{n+g+\\delta}\\right)^{1/(1-\\alpha)}h $$\n\nEverytime:\n $$ \\tilde{y} = \\tilde{k}^\\alpha h^{1-\\alpha} $$\n $$ \\tilde{c} = (1-\\gamma)\\tilde{y}$$# calculate the steady state variables\nk_tilde_ss = (gamma/(n+delta+g))**(1/(1-alpha))*h\ny_tilde_ss = k_tilde_ss**alpha*h**(1-alpha)\nc_tilde_ss = y_tilde_ss*(1-alpha)\nprint('Steady state capital per effective worker = ', np.round(k_tilde_ss,decimals=2))\nprint('Steady state ouput per effective worker = ', np.round(y_tilde_ss,decimals=2))\nprint('Steady state consumption per effective worker = ', np.round(c_tilde_ss,decimals=2))Steady state capital per effective worker = 3.93\nSteady state ouput per effective worker = 1.57\nSteady state consumption per effective worker = 1.05\nRecall the definition of $\\tilde{y} \\equiv \\frac{Y}{eL}$ and $y\\equiv \\frac{Y}{L}$. Therefore, $y=e\\tilde{y}$. In part c, the economy is at the steady state, $\\tilde{y}\\ast$, and $e$ is growing at a constant rate. But we know the initial value of $e(0)=1$, the growth rate of $e$, $g=0.02$. Therefore, $e(20)=e(0)\\exp(g*20).$ Hence, $y(20)=e(20)*\\tilde{y}^\\ast$. print('Income per worker at t=20 is equal to', np.round(np.exp(g*20)*y_tilde_ss,decimals=2))Income per worker at t=20 is equal to 2.34\nTo solve for part d, we first need to simulate $\\tilde{k}$ and $\\tilde{y}$ over time. We can quite easily calculate $e$ over time using the initial value of $e$, $e(0)$, and the growth rate of $e$, $g=0.02$. Then using $y(t)=\\tilde{y(t)}e(t)$ equality, we can generete $y$ sequence over time.# k_tilde_seq will be sequence of k tilde over time\n# since the economy was at the steady state, I initiate k_tilde sequence with 10 values...\n# all equal to the steady state value\n# you assume this is the value of k_tilde before time t=0 and at time t=0,...\n# as there is no change in k_tilde at time t=0. k_tilde will begin increasing at t=1\nk_tilde_seq = [k_tilde_ss,]*10\n# I also create a sequence of human capital\n# h is equal to 1 initially, then it becomes 1.1\nh_seq = np.ones(111)\n# assume 9th element of the sequence corresponds to time t=0\nh_seq[9:] = 1.1\n# starting from the 9th element, or time t=0, simulate the model to get k_tilde over time\nfor t in range(9,110):\n # k_prime is the next periods capital\n # k_prime is equal to current capital plus the change in capital\n k_prime = k_tilde_seq[t]+kdot(k_tilde_seq[t],h_seq[t])\n k_tilde_seq.append(k_prime)\n# generate e sequence as given in the formula: e(t) = e(0)*exp(g*t)\ne_seq = [np.exp(t*g) for t in range(-9,102)]\n# k = k_tilde*e\nk_seq = np.array(k_tilde_seq)*np.array(e_seq)\n# y_tilde = k_tilde^alpha*h^(1-alpha)\ny_tilde_seq = [f(k_tilde_seq[t],h_seq[t]) for t in range(111)]\n# y = y_tilde*e\ny_seq = np.array(y_tilde_seq)*np.array(e_seq)\n# now put all these variables into a table\ndf2 = pd.DataFrame({'Time':np.arange(-9,102),\n 'h':h_seq,\n 'k tilde':k_tilde_seq,\n 'y tilde':y_tilde_seq,\n 'e':e_seq,\n 'k':k_seq,\n 'y':y_seq})\n# here is how our data looks like\ndf2.head(15)\ndf2[df2.Time==60].round(decimals=2)\n# plot k_tilde over time\nfig,ax = plt.subplots()\nplt.plot(df2.Time,df2['k tilde'],'k',linewidth=2)\nax.spines['left'].set_visible(False)\nax.spines['right'].set_visible(False)\nax.spines['top'].set_visible(False)\nax.spines['bottom'].set_visible(False)\nax.set_xlabel('Time')\nax.set_title(r'$\\tilde{k}$')\n# plot y_tilde over time\nfig,ax = plt.subplots()\nplt.plot(df2.Time,df2['y tilde'],'k.',linewidth=2)\nax.spines['left'].set_visible(False)\nax.spines['right'].set_visible(False)\nax.spines['top'].set_visible(False)\nax.spines['bottom'].set_visible(False)\nax.set_xlabel('Time')\nax.set_title(r'$\\tilde{y}$')\n# plot k over time\nyticks = [5,10,20,40]\nfig,ax = plt.subplots()\nplt.plot(df2.Time,df2['k'],'k',linewidth=2)\nplt.plot(df2.Time,k_tilde_ss*np.array(e_seq),'k--',linewidth=2)\nax.set_yscale('log')\nax.set_ylim(ymax=50)\nax.set_yticks(yticks)\nax.set_yticklabels(yticks)\nax.spines['left'].set_visible(False)\nax.spines['right'].set_visible(False)\nax.spines['top'].set_visible(False)\nax.spines['bottom'].set_visible(False)\nax.set_xlabel('Time')\nax.set_title(r'$k$')\n# plot y over time\nyticks = [2,4,8,16]\nfig,ax = plt.subplots()\nplt.plot(df2.Time,df2['y'],'k.',linewidth=2)\nplt.plot(df2.Time,y_tilde_ss*np.array(e_seq),'k--',linewidth=2)\nax.set_yscale('log')\nax.set_ylim(ymax=20)\nax.set_yticks(yticks)\nax.set_yticklabels(yticks)\nax.spines['left'].set_visible(False)\nax.spines['right'].set_visible(False)\nax.spines['top'].set_visible(False)\nax.spines['bottom'].set_visible(False)\nax.set_xlabel('Time')\nax.set_title(r'$y$')\n# plot growth rate of y over time\nfig,ax = plt.subplots()\nplt.plot(df2.Time,np.log(df2['y']).diff(),'k.',linewidth=2)\nax.spines['left'].set_visible(False)\nax.spines['right'].set_visible(False)\nax.spines['top'].set_visible(False)\nax.spines['bottom'].set_visible(False)\nax.set_ylim(ymax= .023)\nax.set_xlabel('Time')\nax.set_title(r'$\\dot{y}/y$')"},"license":{"kind":"string","value":"no_license"},"path":{"kind":"string","value":"/ps2/.ipynb_checkpoints/ps2-checkpoint.ipynb"},"repo_name":{"kind":"string","value":"anhnguyendepocen/economicgrowth"},"chain_length":{"kind":"number","value":11,"string":"11"}}},{"rowIdx":4879,"cells":{"content":{"kind":"string","value":"# Exercise 12\n\n## Analyze how travelers expressed their feelings on Twitter\n\nA sentiment analysis job about the problems of each major U.S. airline. \nTwitter data was scraped from February of 2015 and contributors were \nasked to first classify positive, negative, and neutral tweets, followed\nby categorizing negative reasons (such as \"late flight\" or \"rude service\").### **Juan Camilo Florez 201620135**\n### **Fernando Perez 200222809**\n### **Jhon Florez 201920529**\n### **Angie Paola Chacón 201012536**import pandas as pd\nimport numpy as np\nfrom sklearn.ensemble import RandomForestRegressor, RandomForestClassifier\n\n%matplotlib inline\nimport matplotlib.pyplot as plt\n\n# read the data and set the datetime as the index\ntweets = pd.read_csv('https://github.com/albahnsen/PracticalMachineLearningClass/raw/master/datasets/Tweets.zip', index_col=0)\n\ntweets.head()\ntweets.shape### Proportion of tweets with each sentimenttweets['airline_sentiment'].value_counts()### Proportion of tweets per airline\ntweets['airline'].value_counts()\npd.Series(tweets[\"airline\"]).value_counts().plot(kind = \"bar\",figsize=(8,6),rot = 0)\npd.crosstab(index = tweets[\"airline\"],columns = tweets[\"airline_sentiment\"]).plot(kind='bar',figsize=(10, 6),alpha=0.5,rot=0,stacked=True,title=\"Sentiment by airline\")# Exercise 12.1 \n\nPredict the sentiment using CountVectorizer\n\nuse Random Forest classifierfrom sklearn.model_selection import train_test_split, cross_val_score\nfrom sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer\nfrom sklearn.ensemble import RandomForestClassifier\nfrom nltk.stem.snowball import SnowballStemmer\nfrom nltk.stem import WordNetLemmatizer\nfrom sklearn.naive_bayes import MultinomialNB\nX = tweets['text']\ny = tweets['airline_sentiment'].map({'negative':-1,'neutral':0,'positive':1})\n**crear matrices sparse con los tokens con la función countvectorizer**vect = CountVectorizer(lowercase=True)\nX_dtm = vect.fit_transform(X)\ndf=X_dtm.todense()\ndf\nprint(df.shape)(14640, 15051)\nCon la función de vectorizer se crea una matriz sparse con 15051 columnas (palabras). En este caso no se ha hecho ningún tipo de limpieza al texto\n\n**lista de tokens**print(vect.get_feature_names())['00', '000', '000114', '000419', '000ft', '000lbs', '0011', '0016', '00a', '00am', '00p', '00pm', '01', '0162389030167', '0162424965446', '0162431184663', '0167560070877', '0185', '01ldxn3qqq', '01pm', '02', '0200', '03', '0316', '0372389047497', '04', '0400', '04sdytt7zd', '05', '0510', '0530', '05am', '05pm', '06', '0600', '0638', '0671', '07', '0736', '0769', '07p', '07xhcacjax', '08', '0985', '0_0', '0bjnz4eix5', '0cevy3p42b', '0ewj7oklji', '0hmmqczkcf', '0hxlnvzknp', '0jjt4x3yxg', '0jutcdrljl', '0kn7pjelzl', '0liwecasoe', '0pdntgbxc6', '0prgysvurm', '0wbjawx7xd', '0xjared', '10', '100', '1000', '1000cost', '1001', '1002', '1007', '1008', '101', '1016', '1019', '1020', '1024', '1025', '1027', '1028', '103', '1030pm', '1032', '1038', '104', '1041', '1046', '105', '1050', '1051', '1058', '106', '1065', '1071', '1074', '1079871763', '108', '1080', '1081', '1086', '108639', '1089', '1098', '1099', '10a', '10am', '10d', '10f', '10hrs', '10m', '10min', '10mins', '10p', '10pm', '10th', '[...]**Modelo Random Forest**#CRear base de train y test\nX_train, X_test, y_train, y_test = train_test_split(df, y, test_size=0.3, random_state=42)\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.model_selection import cross_val_score\nfrom sklearn.metrics import mean_squared_error\nfrom sklearn import metrics\n\nclf = RandomForestClassifier(n_jobs=-1, n_estimators=100, max_depth=100, random_state=123, criterion = 'entropy')\nclf\nclf.fit(X_train, y_train)\ny_pred=clf.predict(X_test)\nacc_vect_constpw =metrics.accuracy_score(y_test, y_pred)\nprint(\"Accuracy:\", acc_vect_constpw)Accuracy: 0.7666211293260473\n**matriz de confusión para las predicciones del sentimiento del tweet: negative':-1,'neutral':0,'positive':1**print(pd.crosstab(y_test, y_pred, rownames=['Actual sentiment'], colnames=['Predicted sentiment']))Predicted sentiment -1 0 1\nActual sentiment \n-1 2736 62 16\n 0 533 316 35\n 1 308 71 315\nCon el modelo incluyendo los stopwords se obtiene un accuracy de 0,76.# Exercise 12.2 \n\nRemove stopwords, then predict the sentiment using CountVectorizer.\n\nuse Random Forest classifiervect2 = CountVectorizer(lowercase= True, stop_words='english') #calcula los token y elimina stopwords\nX_dtm2 = vect2.fit_transform(X) #entrenar el modelo\ndf2=X_dtm2.todense()\ndf2\nprint(df2.shape)(14640, 14770)\nAl eliminar los stopword en la función de vectorizer, se crea una matriz sparse con 14770 columnas (palabras)\n\n**Modelo Random Forest para countvectorizer sin stopwords**#Crear base de train y test\nX_train2, X_test2, y_train2, y_test2 = train_test_split(df2, y, test_size=0.3, random_state=42)\nX_train2.shape\nclf = RandomForestClassifier(n_jobs=-1, n_estimators=100, max_depth=100, random_state=123, criterion = 'entropy')\nclf\nclf.fit(X_train2, y_train2)\ny_pred2=clf.predict(X_test2)\nacc_vect_sinstpw = metrics.accuracy_score(y_test2, y_pred2)\nprint(\"Accuracy:\",acc_vect_sinstpw)Accuracy: 0.7497723132969034\n**matriz de confusión para las predicciones del sentimiento del tweet: negative':-1,'neutral':0,'positive':1**print(pd.crosstab(y_test2, y_pred2, rownames=['Actual sentiment'], colnames=['Predicted sentiment']))Predicted sentiment -1 0 1\nActual sentiment \n-1 2720 45 49\n 0 614 209 61\n 1 285 45 364\nCon el modelo sin stopwords se obtiene un accuracy de 0,74, el cual es menor al modelo sin stopwords (0,76). Esto se puede deber a que alguna de las palabras que se están eliminando aportan información relevante sobre la predicción del sentimiento del tweet.# Exercise 12.3\n\nIncrease n_grams size (with and without stopwords), then predict the sentiment using CountVectorizer\n\nuse Random Forest classifier**Modelo random forest, iterando sobre enegramas eliminando stopwords** #Sin la función stopwords\ngram = range(1,6)\nacc1=[]\n\nclf3 = RandomForestClassifier(n_jobs=-1, n_estimators=100, max_depth=100, random_state=123, criterion = 'entropy')\n\nfor i in gram:\n vect3 = CountVectorizer(ngram_range=(1, i), lowercase= True, stop_words='english') \n X_dtm3 = vect3.fit_transform(X) \n X_train3, X_test3, y_train3, y_test3 = train_test_split(X_dtm3, y, test_size=0.3, random_state=42)\n clf3.fit(X_train3, y_train3)\n y_pred3 = clf3.predict(X_test3)\n acc = metrics.accuracy_score(y_test3, y_pred3)\n acc1.append([i, acc]) \n\nacc1\n**Modelo random forest, iterando sobre enegramas sin eliminar stopwords** #Sin la función stopwords\ngram = range(1,6)\nacc2=[]\n\nclf3 = RandomForestClassifier(n_jobs=-1, n_estimators=100, max_depth=100, random_state=123, criterion = 'entropy')\n\nfor i in gram:\n vect3 = CountVectorizer(ngram_range=(1, i), lowercase= True) \n X_dtm3 = vect3.fit_transform(X) \n X_train3, X_test3, y_train3, y_test3 = train_test_split(X_dtm3, y, test_size=0.3, random_state=42)\n clf3.fit(X_train3, y_train3)\n y_pred3 = clf3.predict(X_test3)\n acc = metrics.accuracy_score(y_test3, y_pred3)\n acc2.append([i, acc]) \n\nacc2\nAl iterar el número enegramas en los modelos randomforest con y sin stopwords, utilizando la función countvectorizer, se puede observar que el desempeño del modelo tiende a disminuir, a medida que aumenta el tamaño del enegrama. Por lo cual, se recomienda trabajar con un diccionario solo de palabras como tokens. # Exercise 12.4\n\nPredict the sentiment using TfidfVectorizer.\n\nuse Random Forest classifier### Random forest con TfidfVectorizer eliminando stopwords#\n\nclf3 = RandomForestClassifier(n_jobs=-1, n_estimators=100, max_depth=100, random_state=123, criterion = 'entropy')\n\nvect4 = TfidfVectorizer(lowercase= True, stop_words='english') \nX_dtm4 = vect4.fit_transform(X) \nX_train4, X_test4, y_train4, y_test4 = train_test_split(X_dtm4, y, test_size=0.3, random_state=42)\nclf3.fit(X_train4, y_train4)\ny_pred4 = clf3.predict(X_test4)\nacc_tfidf_sinstw = metrics.accuracy_score(y_test4, y_pred4)\nacc_tfidf_sinstw\n### Random forest con TfidfVectorizer sin eliminar stopwordsclf3 = RandomForestClassifier(n_jobs=-1, n_estimators=100, max_depth=100, random_state=123, criterion = 'entropy')\n\nvect5 = TfidfVectorizer( lowercase= True) \nX_dtm5 = vect5.fit_transform(X) \nX_train5, X_test5, y_train5, y_test5 = train_test_split(X_dtm5, y, test_size=0.3, random_state=42)\nclf3.fit(X_train5, y_train5)\ny_pred5 = clf3.predict(X_test5)\nacc_tfidf_constw = metrics.accuracy_score(y_test5, y_pred5)\nacc_tfidf_constwEn cuanto a los modelos utilizando tfidvectorizer, el modelo que tiene un mejor desempeño es el randomforest sin eliminar stopwords, con un accuracy de 0,71. ### Random forest con TfidfVectorizer eliminando stopwords, iterando enegramas#Sin la función stopwords\ngram = range(1,6)\nacc3=[]\nclf3 = RandomForestClassifier(n_jobs=-1, n_estimators=100, max_depth=100, random_state=123, criterion = 'entropy')\n\nfor i in gram:\n vect3 = TfidfVectorizer(ngram_range=(1, i), lowercase= True) \n X_dtm3 = vect3.fit_transform(X) \n X_train3, X_test3, y_train3, y_test3 = train_test_split(X_dtm3, y, test_size=0.3, random_state=42)\n clf3.fit(X_train3, y_train3)\n y_pred3 = clf3.predict(X_test3)\n acc_tfid_sinst = metrics.accuracy_score(y_test3, y_pred3)\n acc3.append([i,acc_tfid_sinst])\n\nacc3\n ### Random forest con TfidfVectorizer sin eliminar stopwords, iterando enegramas#Sin la función stopwords\ngram = range(1,6)\nacc4 = []\n\nclf3 = RandomForestClassifier(n_jobs=-1, n_estimators=100, max_depth=100, random_state=123, criterion = 'entropy')\n\nfor i in gram:\n vect3 = TfidfVectorizer(ngram_range=(1, i), lowercase= True, stop_words='english') \n X_dtm3 = vect3.fit_transform(X) \n X_train3, X_test3, y_train3, y_test3 = train_test_split(X_dtm3, y, test_size=0.3, random_state=42)\n clf3.fit(X_train3, y_train3)\n y_pred3 = clf3.predict(X_test3)\n acc_tfid_const = metrics.accuracy_score(y_test3, y_pred3)\n acc4.append([i, acc_tfid_const])\n\nacc4Al iterar el número enegramas en los modelos Random Forest con y sin stopwords, utilizando la función TfidfVectorizer para calcular la frecuencia relativa, se puede observar que el desempeño del modelo tiende a disminuir, a medida que aumenta el tamaño del enegrama. Por lo cual, se recomienda trabajar con un diccionario solo de palabras como tokens. ## Conclusiones\n#print('Modelo con countvectorizer sin eliminar stop words \\n',acc_vect_constpw, '\\n ')\n#print('Modelo con countvectorizer eliminando stop words \\n',acc_vect_sinstpw, '\\n ') \n\nprint('Modelo con countvectorizer sin eliminar stop words, iterando enegramas \\n',acc2, '\\n ')\nprint('Modelo con countvectorizer eliminando stop words, iterando enegramas \\n', acc1, '\\n ')\n\n#print('Modelo con TfidfVectorize sin eliminar stop words \\n', acc_tfidf_constw, '\\n ')\n#print('Modelo con TfidfVectorize sin eliminar stop words \\n', acc_tfidf_sinstw, '\\n ')\n\nprint('Modelo con TfidfVectorize sin eliminar stop words, iterando enegramas \\n', acc3, '\\n ')\nprint('Modelo con TfidfVectorize sin eliminar stop words, iterando enegramas \\n', acc4, '\\n ')\nModelo con countvectorizer sin eliminar stop words, iterando enegramas \n [[1, 0.7666211293260473], [2, 0.7331511839708561], [3, 0.7122040072859745], [4, 0.6933060109289617], [5, 0.6810109289617486]] \n \nModelo con countvectorizer eliminando stop words, iterando enegramas \n [[1, 0.7497723132969034], [2, 0.7106102003642987], [3, 0.6867030965391621], [4, 0.6755464480874317], [5, 0.6614298724954463]] \n \nModelo con TfidfVectorize sin eliminar stop words, iterando enegramas \n [[1, 0.7556921675774135], [2, 0.7290528233151184], [3, 0.7135701275045537], [4, 0.6996812386156649], [5, 0.6903460837887068]] \n \nModelo con TfidfVectorize sin eliminar stop words, iterando enegramas \n [[1, 0.7486338797814208], [2, 0.7090163934426229], [3, 0.6898907103825137], [4, 0.6687158469945356], [5, 0.660063752276867]] \n \n**El modelo de mejor desempeño para clasificar el sentimiento del tweet para las aerolíneas se da con la función countvectorizer con un accuracy del 76%, en este caso se debe tener en cuenta los stopwords y no se deben incluir enegramas mayores a 2.** \n\nAsímismo, se recomienda hacer una calibración de los parámetros del random forest, para obtener un mejor resultado de predicción. "},"license":{"kind":"string","value":"no_license"},"path":{"kind":"string","value":"/E12_SentimentPrediction VF.ipynb"},"repo_name":{"kind":"string","value":"angiepa2130/P3-MAAD-grupo-5"},"chain_length":{"kind":"number","value":19,"string":"19"}}},{"rowIdx":4880,"cells":{"content":{"kind":"string","value":"## Dependenciesimport random, os, warnings, math\nimport numpy as np\nimport pandas as pd\nimport seaborn as sns\nfrom matplotlib import pyplot as plt\nfrom sklearn.model_selection import KFold\nfrom sklearn.metrics import mean_squared_error\nimport tensorflow as tf\nimport tensorflow.keras.layers as L\nimport tensorflow.keras.backend as K\nfrom tensorflow.keras import optimizers, losses, metrics, Model\nfrom tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint, LearningRateScheduler\nfrom transformers import TFAutoModelForSequenceClassification, TFAutoModel, AutoTokenizer\n\n\ndef seed_everything(seed=0):\n random.seed(seed)\n np.random.seed(seed)\n tf.random.set_seed(seed)\n os.environ['PYTHONHASHSEED'] = str(seed)\n os.environ['TF_DETERMINISTIC_OPS'] = '1'\n\nseed = 0\nseed_everything(seed)\nsns.set(style='whitegrid')\nwarnings.filterwarnings('ignore')\npd.set_option('display.max_colwidth', 150)### Hardware configuration# TPU or GPU detection\n# Detect hardware, return appropriate distribution strategy\ntry:\n tpu = tf.distribute.cluster_resolver.TPUClusterResolver()\n print(f'Running on TPU {tpu.master()}')\nexcept ValueError:\n tpu = None\n\nif tpu:\n tf.config.experimental_connect_to_cluster(tpu)\n tf.tpu.experimental.initialize_tpu_system(tpu)\n strategy = tf.distribute.experimental.TPUStrategy(tpu)\nelse:\n strategy = tf.distribute.get_strategy()\n\nAUTO = tf.data.experimental.AUTOTUNE\nREPLICAS = strategy.num_replicas_in_sync\nprint(f'REPLICAS: {REPLICAS}')Running on TPU grpc://10.0.0.2:8470\nREPLICAS: 8\n# Load datatrain_filepath = '/kaggle/input/commonlitreadabilityprize/train.csv'\ntrain = pd.read_csv(train_filepath)\nprint(f'Train samples: {len(train)}')\ndisplay(train.head())\n# removing unused columns\ntrain.drop(['url_legal', 'license'], axis=1, inplace=True)Train samples: 2834\n# Model parametersBATCH_SIZE = 8 * REPLICAS\nLEARNING_RATE = 1e-5 * REPLICAS\nEPOCHS = 35\nES_PATIENCE = 10\nPATIENCE = 2\nN_FOLDS = 5\nN_USED_FOLDS = 1\nSEQ_LEN = 256\nBASE_MODEL = '/kaggle/input/huggingface-roberta/roberta-base/'## Auxiliary functions# Datasets utility functions\ndef custom_standardization(text):\n text = text.lower() # if encoder is uncased\n text = text.strip()\n return text\n\n\ndef sample_target(features, target):\n mean, stddev = target\n sampled_target = tf.random.normal([], mean=tf.cast(mean, dtype=tf.float32), \n stddev=tf.cast(stddev, dtype=tf.float32), dtype=tf.float32)\n \n return (features, sampled_target)\n \n\ndef get_dataset(pandas_df, tokenizer, labeled=True, ordered=False, repeated=False, \n is_sampled=False, batch_size=32, seq_len=128):\n \"\"\"\n Return a Tensorflow dataset ready for training or inference.\n \"\"\"\n text = [custom_standardization(text) for text in pandas_df['excerpt']]\n \n # Tokenize inputs\n tokenized_inputs = tokenizer(text, max_length=seq_len, truncation=True, \n padding='max_length', return_tensors='tf')\n \n if labeled:\n dataset = tf.data.Dataset.from_tensor_slices(({'input_ids': tokenized_inputs['input_ids'], \n 'attention_mask': tokenized_inputs['attention_mask']}, \n (pandas_df['target'], pandas_df['standard_error'])))\n if is_sampled:\n dataset = dataset.map(sample_target, num_parallel_calls=tf.data.AUTOTUNE)\n else:\n dataset = tf.data.Dataset.from_tensor_slices({'input_ids': tokenized_inputs['input_ids'], \n 'attention_mask': tokenized_inputs['attention_mask']})\n \n if repeated:\n dataset = dataset.repeat()\n if not ordered:\n dataset = dataset.shuffle(1024)\n dataset = dataset.batch(batch_size)\n dataset = dataset.prefetch(tf.data.AUTOTUNE)\n \n return dataset\n\n\ndef plot_metrics(history):\n metric_list = list(history.keys())\n size = len(metric_list)//2\n fig, axes = plt.subplots(size, 1, sharex='col', figsize=(20, size * 5))\n axes = axes.flatten()\n \n for index in range(len(metric_list)//2):\n metric_name = metric_list[index]\n val_metric_name = metric_list[index+size]\n axes[index].plot(history[metric_name], label='Train %s' % metric_name)\n axes[index].plot(history[val_metric_name], label='Validation %s' % metric_name)\n axes[index].legend(loc='best', fontsize=16)\n axes[index].set_title(metric_name)\n\n plt.xlabel('Epochs', fontsize=16)\n sns.despine()\n plt.show()# Modeldef model_fn(encoder, seq_len=256):\n input_ids = L.Input(shape=(seq_len,), dtype=tf.int32, name='input_ids')\n input_attention_mask = L.Input(shape=(seq_len,), dtype=tf.int32, name='attention_mask')\n \n outputs = encoder({'input_ids': input_ids, \n 'attention_mask': input_attention_mask})\n last_hidden_state = outputs['last_hidden_state']\n \n x = L.GlobalAveragePooling1D()(last_hidden_state)\n output = L.Dense(1, name='output')(x)\n \n model = Model(inputs=[input_ids, input_attention_mask], outputs=output)\n\n optimizer = optimizers.Adam(lr=LEARNING_RATE)\n model.compile(optimizer=optimizer, \n loss=losses.MeanSquaredError(), \n metrics=[metrics.RootMeanSquaredError()])\n \n return model\n\n\nwith strategy.scope():\n encoder = TFAutoModel.from_pretrained(BASE_MODEL)\n model = model_fn(encoder, SEQ_LEN)\n \nmodel.summary()Some layers from the model checkpoint at /kaggle/input/huggingface-roberta/roberta-base/ were not used when initializing TFRobertaModel: ['lm_head']\n- This IS expected if you are initializing TFRobertaModel from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n- This IS NOT expected if you are initializing TFRobertaModel from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\nAll the layers of TFRobertaModel were initialized from the model checkpoint at /kaggle/input/huggingface-roberta/roberta-base/.\nIf your task is similar to the task the model of the checkpoint was trained on, you can already use TFRobertaModel for predictions without further training.\n# Trainingtokenizer = AutoTokenizer.from_pretrained(BASE_MODEL)\nskf = KFold(n_splits=N_FOLDS, shuffle=True, random_state=seed)\noof_pred = []; oof_labels = []; history_list = []\n\nfor fold,(idxT, idxV) in enumerate(skf.split(train)):\n if fold >= N_USED_FOLDS:\n break\n if tpu: tf.tpu.experimental.initialize_tpu_system(tpu)\n print(f'\\nFOLD: {fold+1}')\n print(f'TRAIN: {len(idxT)} VALID: {len(idxV)}')\n\n # Model\n K.clear_session()\n with strategy.scope():\n encoder = TFAutoModel.from_pretrained(BASE_MODEL)\n model = model_fn(encoder, SEQ_LEN)\n \n model_path = f'model_{fold}.h5'\n es = EarlyStopping(monitor='val_root_mean_squared_error', mode='min', \n patience=ES_PATIENCE, restore_best_weights=True, verbose=1)\n checkpoint = ModelCheckpoint(model_path, monitor='val_root_mean_squared_error', mode='min', \n save_best_only=True, save_weights_only=True)\n\n # Train\n history = model.fit(x=get_dataset(train.loc[idxT], tokenizer, repeated=True, is_sampled=True, \n batch_size=BATCH_SIZE, seq_len=SEQ_LEN), \n validation_data=get_dataset(train.loc[idxV], tokenizer, ordered=True, \n batch_size=BATCH_SIZE, seq_len=SEQ_LEN), \n steps_per_epoch=50, \n callbacks=[es, checkpoint], \n epochs=EPOCHS, \n verbose=2).history\n \n history_list.append(history)\n # Save last model weights\n model.load_weights(model_path)\n \n # Results\n print(f\"#### FOLD {fold+1} OOF RMSE = {np.min(history['val_root_mean_squared_error']):.4f}\")\n\n # OOF predictions\n valid_ds = get_dataset(train.loc[idxV], tokenizer, ordered=True, batch_size=BATCH_SIZE, seq_len=SEQ_LEN)\n oof_labels.append([target[0].numpy() for sample, target in iter(valid_ds.unbatch())])\n x_oof = valid_ds.map(lambda sample, target: sample)\n oof_pred.append(model.predict(x_oof))\nFOLD: 1\nTRAIN: 2267 VALID: 567\n## Model loss and metrics graphfor fold, history in enumerate(history_list):\n print(f'\\nFOLD: {fold+1}')\n plot_metrics(history)\nFOLD: 1\n# Model evaluation\n\nWe are evaluating the model on the `OOF` predictions, it stands for `Out Of Fold`, since we are training using `K-Fold` our model will see all the data, and the correct way to evaluate each fold is by looking at the predictions that are not from that fold.\n\n## OOF metricsy_true = np.concatenate(oof_labels)\ny_preds = np.concatenate(oof_pred)\n\n\nfor fold, history in enumerate(history_list):\n print(f\"FOLD {fold+1} RMSE: {np.min(history['val_root_mean_squared_error']):.4f}\")\n \nprint(f'OOF RMSE: {mean_squared_error(y_true, y_preds, squared=False):.4f}')FOLD 1 RMSE: 0.5370\nOOF RMSE: 0.5370\n### **Error analysis**, label x prediction distribution\n\nHere we can compare the distribution from the labels and the predicted values, in a perfect scenario they should align.preds_df = pd.DataFrame({'Label': y_true, 'Prediction': y_preds[:,0]})\n\nfig, ax = plt.subplots(1, 1, figsize=(20, 6))\nsns.distplot(preds_df['Label'], ax=ax, label='Label')\nsns.distplot(preds_df['Prediction'], ax=ax, label='Prediction')\nax.legend()\nplt.show()\nsns.jointplot(data=preds_df, x='Label', y='Prediction', kind='reg', height=10)\nplt.show()"},"license":{"kind":"string","value":"permissive"},"path":{"kind":"string","value":"/Model backlog/Train/7-commonlit-roberta-base-seq-256-sampling.ipynb"},"repo_name":{"kind":"string","value":"dimitreOliveira/CommonLit-Readability-Prize"},"chain_length":{"kind":"number","value":10,"string":"10"}}},{"rowIdx":4881,"cells":{"content":{"kind":"string","value":"diffs, co can z scale, hl can smartscale sigma2for label in ['diff_co','diff_hl']:\n my_df[label].plot.hist( bins=30, title=label )\n plt.show()\n\nnp.log10( my_df['diff_hl'] ).plot.hist( bins=30, title='log( diff_hl )' )\nplt.show()\nfoo, vals = rv.smart_scale( my_df, 'diff_co', n_sigma=8.0, show_final=True, curve_boost=4e4, return_coeff=True )\nprint foo.describe()\nfoo = rv.scale_column( my_df, 'diff_co' )\nfoo.plot.hist( bins=91 )\nplt.xlim(-5,5)\nplt.show()\nscale_dict['diff_co_mean'] = my_df['diff_co'].mean()\nscale_dict['diff_co_std' ] = my_df['diff_co'].std()\nmy_df['log_diff_hl'] = np.log10( my_df['diff_hl'] )\nfoo, vals = rv.smart_scale( my_df, 'log_diff_hl', n_sigma=2.0, show_final=True, curve_boost=4e4, return_coeff=True )\nscale_dict['log_diff_hl_mean'] = vals[0]\nscale_dict['log_diff_hl_std' ] = vals[1]\nprint foo.describe()Momentum, z scaling does wellfor i in mom_nums:\n label = 'momentum_'+str(i)\n my_df[label].plot.hist( bins=30, title=label )\n plt.show()\nfor i in mom_nums:\n label = 'momentum_'+str(i)\n foo = rv.scale_column(my_df,label)#smart_scale( my_df, label, n_sigma=10.0, show_final=True, curve_boost=1e4, return_coeff=True, )\n #vals[0] = foo.mean()\n #vals[1] = foo.std()\n #scale_dict[label+'_mean'] = my_df[label].mean()\n #scale_dict[label+'_std' ] = my_df[label].std()\n foo.plot.hist( bins=91 )\n plt.xlim(-5,5)\n plt.show()\n print foo.describe()\nfor i in mom_nums:\n label = 'momentum_'+str(i)\n ( (my_df[label]-0.0) / 0.1 ).plot.hist( bins=71, title=label )\n plt.xlim(-5,5)\n plt.show()\nscale_dict['momentum_mean'] = 0.0\nscale_dict['momentum_std' ] = 0.1RSI, do a modified zscaling. Center of distribution is clearly 0.5 by definition, using std of 0.2 for all will put in same window, and sort of resemble a normal distributionfor i in rsi_nums:\n label = 'rsi_'+str(i)\n my_df[label].plot.hist( bins=30, title=label )\n plt.show()\nfor i in rsi_nums:\n label = 'rsi_'+str(i)\n ( (my_df[label] - 0.5) / 0.2 ).plot.hist( bins=30, title=label )\n plt.show()\nfor i in rsi_nums:\n label = 'rsi_'+str(i)\n foo, vals = rv.smart_scale( my_df, label, n_sigma=2.0, show_final=True, curve_boost=1e4, return_coeff=True )\n #scale_dict[label+'_mean'] = vals[0]\n #scale_dict[label+'_std' ] = vals[1]\n print foo.describe()\nscale_dict['rsi_mean'] = 0.5\nscale_dict['rsi_std' ] = 0.2Bollinger bands, already centered at 0, use std of 0.65 for proper spreadfor i in band_nums:\n label = 'bollinger_'+str(i)\n my_df[label].plot.hist( bins=30, title=label )\n plt.show()\nfor i in band_nums:\n label = 'bollinger_'+str(i)\n #foo, vals = rv.smart_scale( my_df, label, n_sigma=2.0, show_final=True, curve_boost=1e4, return_coeff=True )\n #scale_dict[label+'_mean'] = vals[0]\n #scale_dict[label+'_std' ] = vals[1]\n #foo = rv.scale_column(my_df,label)#smart_scale( my_df, label, n_sigma=10.0, show_final=True, curve_boost=1e4, return_coeff=True, )\n #vals[0] = foo.mean()\n #vals[1] = foo.std()\n #scale_dict[label+'_mean'] = my_df[label].mean()\n #scale_dict[label+'_std' ] = my_df[label].std()\n foo = ( (my_df[label] - 0.0 ) / 0.65 )\n foo.plot.hist( bins=91 )\n plt.xlim(-5,5)\n plt.show()\n print my_df[label].mean()\n print my_df[label].std()\n print foo.describe()\nscale_dict['band_mean'] = 0.0\nscale_dict['band_std' ] = 0.65\nmy_df.columns.values\nprint my_df['frac_year_1'].describe()\nprint my_df['frac_year_2'].describe()\nmy_df['close'].plot.hist( bins=30 )\nprint scale_dict\nimport pickle\nwith open('quotes/scaling_dict.pkl','wb') as handle:\n pickle.dump( scale_dict, handle, protocol=pickle.HIGHEST_PROTOCOL )"},"license":{"kind":"string","value":"no_license"},"path":{"kind":"string","value":"/Scaling.ipynb"},"repo_name":{"kind":"string","value":"markertsean/python_trader"},"chain_length":{"kind":"number","value":4,"string":"4"}}},{"rowIdx":4882,"cells":{"content":{"kind":"string","value":"# install packages for Colab\n# install.packages(c(\"rsample\", \"caret\", \"vip\", \"h2o\", \"AmesHousing\", \"viridis\", \"broom\"))\n\n# Helper packages\nlibrary(dplyr) # for data manipulation\nlibrary(ggplot2) # for awesome graphics\nggplot2::theme_set(ggplot2::theme_light())\nlibrary(viridis)\nlibrary(broom)\n\n# Modeling process packages\nlibrary(rsample) # for resampling\nlibrary(caret) # for resampling and model training\nlibrary(h2o) # for resampling and model training\nh2o.no_progress() # turn off h2o progress bars\nh2o.init() # launch h2o\n\n# Model interpretability packages\nlibrary(vip) # variable importance\n\n# Ames housing data\nlibrary(AmesHousing)\names <- AmesHousing::make_ames()\names.h2o <- as.h2o(ames)\nAttaching package: ‘dplyr’\n\n\nThe following objects are masked from ‘package:stats’:\n\n filter, lag\n\n\nThe following objects are masked from ‘package:base’:\n\n intersect, setdiff, setequal, union\n\n\nLoading required package: viridisLite\n\nLoading required package: lattice\n\n\n----------------------------------------------------------------------\n\nYour next step is to start H2O:\n > h2o.init()\n\nFor H2O package documentation, ask for help:\n > ??h2o\n\nAfter starting H2O, you can use the Web UI at http://localhost:54321\nFor more information visit https://docs.h2o.ai\n\n----------------------------------------------------------------------\n\n\n\nAttaching package: ‘h2o’\n\n\nThe following objects are masked from ‘package:stats’:\n\n cor, sd, var\n\n\nThe following objects are masked from ‘package:base’:\n\n &&, %*%, %in%, ||, apply, as.factor, as.numeric, colnames,\n colnames<-, ifelse, is.character, is.factor, is.numeric, log,\n log10, log1p, log2, round, signif, trunc\n\n\n# Workflow using `rsample`## Simple linear regression### Estimation# stratified sampling\nset.seed(123)\nsplit <- initial_split(ames, prop = 0.7, strata = \"Sale_Price\")\names_train <- training(split)\names_test <- testing(split)\n# linear model with single predictor\nmodel1 <- lm(Sale_Price ~ Gr_Liv_Area, data = ames_train)\n# Fitted regression line (full training data)\np1 <- model1 %>%\n broom::augment() %>%\n ggplot(aes(Gr_Liv_Area, Sale_Price)) + \n geom_point(size = 1, alpha = 0.3) +\n geom_smooth(se = FALSE, method = \"lm\") +\n scale_y_continuous(labels = scales::dollar) +\n ggtitle(\"Fitted regression line\")\n\n# Fitted regression line (restricted range)\np2 <- model1 %>%\n broom::augment() %>%\n ggplot(aes(Gr_Liv_Area, Sale_Price)) + \n geom_segment(aes(x = Gr_Liv_Area, y = Sale_Price,\n xend = Gr_Liv_Area, yend = .fitted), \n alpha = 0.3) +\n geom_point(size = 1, alpha = 0.3) +\n geom_smooth(se = FALSE, method = \"lm\") +\n scale_y_continuous(labels = scales::dollar) +\n ggtitle(\"Fitted regression line (with residuals)\")\n\n# Side-by-side plots\ngrid.arrange(p1, p2, nrow = 1)\n\nsummary(model1)\nsigma(model1) #RMSE\nsigma(model1)^2 #MSE### Inferenceconfint(model1, level = 0.95)## Multiple linear regression(model2 <- lm(Sale_Price ~ Gr_Liv_Area + Year_Built, data = ames_train))\n(model2 <- update(model1, . ~ . + Year_Built))\nlm(Sale_Price ~ Gr_Liv_Area + Year_Built + Gr_Liv_Area:Year_Built, data = ames_train)\n# Fitted models\nfit1 <- lm(Sale_Price ~ Gr_Liv_Area + Year_Built, data = ames_train)\nfit2 <- lm(Sale_Price ~ Gr_Liv_Area * Year_Built, data = ames_train)\n\n# Regression plane data\nplot_grid <- expand.grid(\n Gr_Liv_Area = seq(from = min(ames_train$Gr_Liv_Area), to = max(ames_train$Gr_Liv_Area), \n length = 100), \n Year_Built = seq(from = min(ames_train$Year_Built), to = max(ames_train$Year_Built), \n length = 100)\n)\nplot_grid$y1 <- predict(fit1, newdata = plot_grid)\nplot_grid$y2 <- predict(fit2, newdata = plot_grid)\n\n# Level plots\np1 <- ggplot(plot_grid, aes(x = Gr_Liv_Area, y = Year_Built, \n z = y1, fill = y1)) +\n geom_tile() +\n geom_contour(color = \"white\") +\n viridis::scale_fill_viridis(name = \"Predicted\\nvalue\", option = \"inferno\") +\n theme_bw() +\n ggtitle(\"Main effects only\")\np2 <- ggplot(plot_grid, aes(x = Gr_Liv_Area, y = Year_Built, \n z = y2, fill = y1)) +\n geom_tile() +\n geom_contour(color = \"white\") +\n viridis::scale_fill_viridis(name = \"Predicted\\nvalue\", option = \"inferno\") +\n theme_bw() +\n ggtitle(\"Main effects with two-way interaction\")\n\ngridExtra::grid.arrange(p1, p2, nrow = 1)\ninstall.packages(\"broom\")\nlibrary(broom)\n# include all possible main effects\nmodel3 <- lm(Sale_Price ~ ., data = ames_train) \n\n# print estimated coefficients in a tidy data frame\nbroom::tidy(model3) ## Assessing model accuracy# Train model using 10-fold cross-validation\nset.seed(123) # for reproducibility\n(cv_model1 <- train(\n form = Sale_Price ~ Gr_Liv_Area, \n data = ames_train, \n method = \"lm\",\n trControl = trainControl(method = \"cv\", number = 10)\n))\n# model 2 CV\nset.seed(123)\ncv_model2 <- train(\n Sale_Price ~ Gr_Liv_Area + Year_Built, \n data = ames_train, \n method = \"lm\",\n trControl = trainControl(method = \"cv\", number = 10)\n)\n\n# model 3 CV\nset.seed(123)\ncv_model3 <- train(\n Sale_Price ~ ., \n data = ames_train, \n method = \"lm\",\n trControl = trainControl(method = \"cv\", number = 10)\n)\n\n# Extract out of sample performance measures\nsummary(resamples(list(\n model1 = cv_model1, \n model2 = cv_model2, \n model3 = cv_model3\n)))Warning message in predict.lm(modelFit, newdata):\n“prediction from a rank-deficient fit may be misleading”\nWarning message in predict.lm(modelFit, newdata):\n“prediction from a rank-deficient fit may be misleading”\nWarning message in predict.lm(modelFit, newdata):\n“prediction from a rank-deficient fit may be misleading”\nWarning message in predict.lm(modelFit, newdata):\n“prediction from a rank-deficient fit may be misleading”\nWarning message in predict.lm(modelFit, newdata):\n“prediction from a rank-deficient fit may be misleading”\nWarning message in predict.lm(modelFit, newdata):\n“prediction from a rank-deficient fit may be misleading”\nWarning message in predict.lm(modelFit, newdata):\n“prediction from a rank-deficient fit may be misleading”\nWarning message in predict.lm(modelFit, newdata):\n“prediction from a rank-deficient fit may be misleading”\nWarning message in predict.lm(modelFit, newdata):\n“prediction from a rank-deficient fit may be misleading”\nWarning message in predict.lm(modelFi[...]## Model concernsp1 <- ggplot(ames_train, aes(Year_Built, Sale_Price)) + \n geom_point(size = 1, alpha = .4) +\n geom_smooth(se = FALSE) +\n scale_y_continuous(\"Sale price\", labels = scales::dollar) +\n xlab(\"Year built\") +\n ggtitle(paste(\"Non-transformed variables with a\\n\",\n \"non-linear relationship.\"))\n\np2 <- ggplot(ames_train, aes(Year_Built, Sale_Price)) + \n geom_point(size = 1, alpha = .4) + \n geom_smooth(method = \"lm\", se = FALSE) +\n scale_y_log10(\"Sale price\", labels = scales::dollar, \n breaks = seq(0, 400000, by = 100000)) +\n xlab(\"Year built\") +\n ggtitle(paste(\"Transforming variables can provide a\\n\",\n \"near-linear relationship.\"))\n\ngridExtra::grid.arrange(p1, p2, nrow = 1)\ndf1 <- broom::augment(cv_model1$finalModel, data = ames_train)\n\np1 <- ggplot(df1, aes(.fitted, .resid)) + \n geom_point(size = 1, alpha = .4) +\n xlab(\"Predicted values\") +\n ylab(\"Residuals\") +\n ggtitle(\"Model 1\", subtitle = \"Sale_Price ~ Gr_Liv_Area\")\n\ndf2 <- broom::augment(cv_model3$finalModel, data = ames_train)\n\np2 <- ggplot(df2, aes(.fitted, .resid)) + \n geom_point(size = 1, alpha = .4) +\n xlab(\"Predicted values\") +\n ylab(\"Residuals\") +\n ggtitle(\"Model 3\", subtitle = \"Sale_Price ~ .\")\n\ngridExtra::grid.arrange(p1, p2, nrow = 1)"},"license":{"kind":"string","value":"permissive"},"path":{"kind":"string","value":"/ames-housing/homlr-chapter-4-linear-regression.ipynb"},"repo_name":{"kind":"string","value":"YoYo1971/discover-projects"},"chain_length":{"kind":"number","value":6,"string":"6"}}},{"rowIdx":4883,"cells":{"content":{"kind":"string","value":"# Regression Week 4: Ridge Regression (interpretation)In this notebook, we will run ridge regression multiple times with different L2 penalties to see which one produces the best fit. We will revisit the example of polynomial regression as a means to see the effect of L2 regularization. In particular, we will:\n* Use a pre-built implementation of regression (GraphLab Create) to run polynomial regression\n* Use matplotlib to visualize polynomial regressions\n* Use a pre-built implementation of regression (GraphLab Create) to run polynomial regression, this time with L2 penalty\n* Use matplotlib to visualize polynomial regressions under L2 regularization\n* Choose best L2 penalty using cross-validation.\n* Assess the final fit using test data.\n\nWe will continue to use the House data from previous notebooks. (In the next programming assignment for this module, you will implement your own ridge regression learning algorithm using gradient descent.)# Fire up graphlab createimport graphlab/usr/local/lib/python2.7/dist-packages/requests/packages/urllib3/util/ssl_.py:315: SNIMissingWarning: An HTTPS request has been made, but the SNI (Subject Name Indication) extension to TLS is not available on this platform. This may cause the server to present an incorrect TLS certificate, which can cause validation failures. For more information, see https://urllib3.readthedocs.org/en/latest/security.html#snimissingwarning.\n SNIMissingWarning\n/usr/local/lib/python2.7/dist-packages/requests/packages/urllib3/util/ssl_.py:120: InsecurePlatformWarning: A true SSLContext object is not available. This prevents urllib3 from configuring SSL appropriately and may cause certain SSL connections to fail. For more information, see https://urllib3.readthedocs.org/en/latest/security.html#insecureplatformwarning.\n InsecurePlatformWarning\n[WARNING] Unable to write current GraphLab Create license to /home/kuntal/.graphlab/config. Ensure that this user account has write permiss[...]# Polynomial regression, revisitedWe build on the material from Week 3, where we wrote the function to produce an SFrame with columns containing the powers of a given input. Copy and paste the function `polynomial_sframe` from Week 3:def polynomial_sframe(feature, degree):\n # assume that degree >= 1\n # initialize the SFrame:\n poly_sframe = graphlab.SFrame()\n # and set poly_sframe['power_1'] equal to the passed feature\n poly_sframe['power_1'] = feature\n # first check if degree > 1\n if degree > 1:\n # then loop over the remaining degrees:\n # range usually starts at 0 and stops at the endpoint-1. We want it to start at 2 and stop at degree\n for power in range(2, degree+1): \n # first we'll give the column a name:\n name = 'power_' + str(power)\n # then assign poly_sframe[name] to the appropriate power of feature\n poly_sframe[name] = feature**power\n return poly_sframe\n Let's use matplotlib to visualize what a polynomial regression looks like on the house data.import matplotlib.pyplot as plt\n%matplotlib inline\nsales = graphlab.SFrame('kc_house_data.gl/')As in Week 3, we will use the sqft_living variable. For plotting purposes (connecting the dots), you'll need to sort by the values of sqft_living. For houses with identical square footage, we break the tie by their prices.sales = sales.sort(['sqft_living','price'])Let us revisit the 15th-order polynomial model using the 'sqft_living' input. Generate polynomial features up to degree 15 using `polynomial_sframe()` and fit a model with these features. When fitting the model, use an L2 penalty of `1e-5`:l2_small_penalty = 1e-5Note: When we have so many features and so few data points, the solution can become highly numerically unstable, which can sometimes lead to strange unpredictable results. Thus, rather than using no regularization, we will introduce a tiny amount of regularization (`l2_penalty=1e-5`) to make the solution numerically stable. (In lecture, we discussed the fact that regularization can also help with numerical stability, and here we are seeing a practical example.)\n\nWith the L2 penalty specified above, fit the model and print out the learned weights.\n\nHint: make sure to add 'price' column to the new SFrame before calling `graphlab.linear_regression.create()`. Also, make sure GraphLab Create doesn't create its own validation set by using the option `validation_set=None` in this call.poly1_data = polynomial_sframe(sales['sqft_living'], 15) # use equivalent of `polynomial_sframe`\nmy_features = poly1_data.column_names() \npoly1_data['price'] = sales['price']\nmodel1 = graphlab.linear_regression.create(poly1_data, target = 'price', features = my_features, validation_set = None, l2_penalty=l2_small_penalty)\nmodel1.get(\"coefficients\")***QUIZ QUESTION: What's the learned value for the coefficient of feature `power_1`?***# Observe overfittingRecall from Week 3 that the polynomial fit of degree 15 changed wildly whenever the data changed. In particular, when we split the sales data into four subsets and fit the model of degree 15, the result came out to be very different for each subset. The model had a *high variance*. We will see in a moment that ridge regression reduces such variance. But first, we must reproduce the experiment we did in Week 3.First, split the data into split the sales data into four subsets of roughly equal size and call them `set_1`, `set_2`, `set_3`, and `set_4`. Use `.random_split` function and make sure you set `seed=0`. (semi_split1, semi_split2) = sales.random_split(.5,seed=0)\n(set_1, set_2) = semi_split1.random_split(0.5, seed=0)\n(set_3, set_4) = semi_split2.random_split(0.5, seed=0)Next, fit a 15th degree polynomial on `set_1`, `set_2`, `set_3`, and `set_4`, using 'sqft_living' to predict prices. Print the weights and make a plot of the resulting model.\n\nHint: When calling `graphlab.linear_regression.create()`, use the same L2 penalty as before (i.e. `l2_small_penalty`). Also, make sure GraphLab Create doesn't create its own validation set by using the option `validation_set = None` in this call.poly1_data = polynomial_sframe(set_1['sqft_living'], 15) # use equivalent of `polynomial_sframe`\nmy_features = poly1_data.column_names() \npoly1_data['price'] = set_1['price']\nmodel1 = graphlab.linear_regression.create(poly1_data, target = 'price', features = my_features, verbose = False, validation_set = None, l2_penalty=l2_small_penalty)\nmodel1.get(\"coefficients\")\npoly1_data = polynomial_sframe(set_2['sqft_living'], 15) # use equivalent of `polynomial_sframe`\nmy_features = poly1_data.column_names() \npoly1_data['price'] = set_2['price']\nmodel1 = graphlab.linear_regression.create(poly1_data, target = 'price', features = my_features, verbose = False, validation_set = None, l2_penalty=l2_small_penalty)\nmodel1.get(\"coefficients\")\npoly1_data = polynomial_sframe(set_3['sqft_living'], 15) # use equivalent of `polynomial_sframe`\nmy_features = poly1_data.column_names() \npoly1_data['price'] = set_3['price']\nmodel1 = graphlab.linear_regression.create(poly1_data, target = 'price', features = my_features, verbose = False, validation_set = None, l2_penalty=l2_small_penalty)\nmodel1.get(\"coefficients\")\npoly1_data = polynomial_sframe(set_4['sqft_living'], 15) # use equivalent of `polynomial_sframe`\nmy_features = poly1_data.column_names() \npoly1_data['price'] = set_4['price']\nmodel1 = graphlab.linear_regression.create(poly1_data, target = 'price', features = my_features, verbose = False, validation_set = None, l2_penalty=l2_small_penalty)\nmodel1.get(\"coefficients\")The four curves should differ from one another a lot, as should the coefficients you learned.\n\n***QUIZ QUESTION: For the models learned in each of these training sets, what are the smallest and largest values you learned for the coefficient of feature `power_1`?*** (For the purpose of answering this question, negative numbers are considered \"smaller\" than positive numbers. So -5 is smaller than -3, and -3 is smaller than 5 and so forth.)# Ridge regression comes to rescueGenerally, whenever we see weights change so much in response to change in data, we believe the variance of our estimate to be large. Ridge regression aims to address this issue by penalizing \"large\" weights. (Weights of `model15` looked quite small, but they are not that small because 'sqft_living' input is in the order of thousands.)\n\nWith the argument `l2_penalty=1e5`, fit a 15th-order polynomial model on `set_1`, `set_2`, `set_3`, and `set_4`. Other than the change in the `l2_penalty` parameter, the code should be the same as the experiment above. Also, make sure GraphLab Create doesn't create its own validation set by using the option `validation_set = None` in this call.poly1_data = polynomial_sframe(set_1['sqft_living'], 15) # use equivalent of `polynomial_sframe`\nmy_features = poly1_data.column_names() \npoly1_data['price'] = set_1['price']\nmodel1 = graphlab.linear_regression.create(poly1_data, target = 'price', features = my_features, verbose = False, validation_set = None, l2_penalty=1e5)\nmodel1.get(\"coefficients\")\npoly1_data = polynomial_sframe(set_2['sqft_living'], 15) # use equivalent of `polynomial_sframe`\nmy_features = poly1_data.column_names() \npoly1_data['price'] = set_2['price']\nmodel1 = graphlab.linear_regression.create(poly1_data, target = 'price', features = my_features, verbose = False, validation_set = None, l2_penalty=1e5)\nmodel1.get(\"coefficients\")\npoly1_data = polynomial_sframe(set_3['sqft_living'], 15) # use equivalent of `polynomial_sframe`\nmy_features = poly1_data.column_names() \npoly1_data['price'] = set_3['price']\nmodel1 = graphlab.linear_regression.create(poly1_data, target = 'price', features = my_features, verbose = False, validation_set = None, l2_penalty=1e5)\nmodel1.get(\"coefficients\")\npoly1_data = polynomial_sframe(set_4['sqft_living'], 15) # use equivalent of `polynomial_sframe`\nmy_features = poly1_data.column_names() \npoly1_data['price'] = set_4['price']\nmodel1 = graphlab.linear_regression.create(poly1_data, target = 'price', features = my_features, verbose = False, validation_set = None, l2_penalty=1e5)\nmodel1.get(\"coefficients\")These curves should vary a lot less, now that you applied a high degree of regularization.\n\n***QUIZ QUESTION: For the models learned with the high level of regularization in each of these training sets, what are the smallest and largest values you learned for the coefficient of feature `power_1`?*** (For the purpose of answering this question, negative numbers are considered \"smaller\" than positive numbers. So -5 is smaller than -3, and -3 is smaller than 5 and so forth.)# Selecting an L2 penalty via cross-validationJust like the polynomial degree, the L2 penalty is a \"magic\" parameter we need to select. We could use the validation set approach as we did in the last module, but that approach has a major disadvantage: it leaves fewer observations available for training. **Cross-validation** seeks to overcome this issue by using all of the training set in a smart way.\n\nWe will implement a kind of cross-validation called **k-fold cross-validation**. The method gets its name because it involves dividing the training set into k segments of roughtly equal size. Similar to the validation set method, we measure the validation error with one of the segments designated as the validation set. The major difference is that we repeat the process k times as follows:\n\nSet aside segment 0 as the validation set, and fit a model on rest of data, and evalutate it on this validation set\nSet aside segment 1 as the validation set, and fit a model on rest of data, and evalutate it on this validation set\n...\nSet aside segment k-1 as the validation set, and fit a model on rest of data, and evalutate it on this validation set\n\nAfter this process, we compute the average of the k validation errors, and use it as an estimate of the generalization error. Notice that all observations are used for both training and validation, as we iterate over segments of data. \n\nTo estimate the generalization error well, it is crucial to shuffle the training data before dividing them into segments. GraphLab Create has a utility function for shuffling a given SFrame. We reserve 10% of the data as the test set and shuffle the remainder. (Make sure to use `seed=1` to get consistent answer.)(train_valid, test) = sales.random_split(.9, seed=1)\ntrain_valid_shuffled = graphlab.toolkits.cross_validation.shuffle(train_valid, random_seed=1)Once the data is shuffled, we divide it into equal segments. Each segment should receive `n/k` elements, where `n` is the number of observations in the training set and `k` is the number of segments. Since the segment 0 starts at index 0 and contains `n/k` elements, it ends at index `(n/k)-1`. The segment 1 starts where the segment 0 left off, at index `(n/k)`. With `n/k` elements, the segment 1 ends at index `(n*2/k)-1`. Continuing in this fashion, we deduce that the segment `i` starts at index `(n*i/k)` and ends at `(n*(i+1)/k)-1`.With this pattern in mind, we write a short loop that prints the starting and ending indices of each segment, just to make sure you are getting the splits right.n = len(train_valid_shuffled)\nk = 10 # 10-fold cross-validation\n\nfor i in xrange(k):\n start = (n*i)/k\n end = (n*(i+1))/k-1\n print i, (start, end)0 (0, 1938)\n1 (1939, 3878)\n2 (3879, 5817)\n3 (5818, 7757)\n4 (7758, 9697)\n5 (9698, 11636)\n6 (11637, 13576)\n7 (13577, 15515)\n8 (15516, 17455)\n9 (17456, 19395)\nLet us familiarize ourselves with array slicing with SFrame. To extract a continuous slice from an SFrame, use colon in square brackets. For instance, the following cell extracts rows 0 to 9 of `train_valid_shuffled`. Notice that the first index (0) is included in the slice but the last index (10) is omitted.train_valid_shuffled[0:10] # rows 0 to 9Now let us extract individual segments with array slicing. Consider the scenario where we group the houses in the `train_valid_shuffled` dataframe into k=10 segments of roughly equal size, with starting and ending indices computed as above.\nExtract the fourth segment (segment 3) and assign it to a variable called `validation4`.validation4=train_valid_shuffled[5818:7758]To verify that we have the right elements extracted, run the following cell, which computes the average price of the fourth segment. When rounded to nearest whole number, the average should be $536,234.print int(round(validation4['price'].mean(), 0))536234\nAfter designating one of the k segments as the validation set, we train a model using the rest of the data. To choose the remainder, we slice (0:start) and (end+1:n) of the data and paste them together. SFrame has `append()` method that pastes together two disjoint sets of rows originating from a common dataset. For instance, the following cell pastes together the first and last two rows of the `train_valid_shuffled` dataframe.n = len(train_valid_shuffled)\nfirst_two = train_valid_shuffled[0:2]\nlast_two = train_valid_shuffled[n-2:n]\nprint first_two.append(last_two)+------------+---------------------------+-----------+----------+-----------+\n| id | date | price | bedrooms | bathrooms |\n+------------+---------------------------+-----------+----------+-----------+\n| 2780400035 | 2014-05-05 00:00:00+00:00 | 665000.0 | 4.0 | 2.5 |\n| 1703050500 | 2015-03-21 00:00:00+00:00 | 645000.0 | 3.0 | 2.5 |\n| 4139480190 | 2014-09-16 00:00:00+00:00 | 1153000.0 | 3.0 | 3.25 |\n| 7237300290 | 2015-03-26 00:00:00+00:00 | 338000.0 | 5.0 | 2.5 |\n+------------+---------------------------+-----------+----------+-----------+\n+-------------+----------+--------+------------+------+-----------+-------+------------+\n| sqft_living | sqft_lot | floors | waterfront | view | condition | grade | sqft_above |\n+-------------+----------+--------+------------+------+-----------+-------+------------+\n| 2800.0 | 5900 | 1 | 0 | 0 | 3 | 8 | 1660 |\n| 2490.0 | 59[...]Extract the remainder of the data after *excluding* fourth segment (segment 3) and assign the subset to `train4`.train4=train_valid_shuffled[0:5818].append(train_valid_shuffled[7758:19396])To verify that we have the right elements extracted, run the following cell, which computes the average price of the data with fourth segment excluded. When rounded to nearest whole number, the average should be $539,450.print int(round(train4['price'].mean(), 0))539450\nNow we are ready to implement k-fold cross-validation. Write a function that computes k validation errors by designating each of the k segments as the validation set. It accepts as parameters (i) `k`, (ii) `l2_penalty`, (iii) dataframe, (iv) name of output column (e.g. `price`) and (v) list of feature names. The function returns the average validation error using k segments as validation sets.\n\n* For each i in [0, 1, ..., k-1]:\n * Compute starting and ending indices of segment i and call 'start' and 'end'\n * Form validation set by taking a slice (start:end+1) from the data.\n * Form training set by appending slice (end+1:n) to the end of slice (0:start).\n * Train a linear model using training set just formed, with a given l2_penalty\n * Compute validation error using validation set just formeddef get_RSS(prediction, output):\n residual = output - prediction\n # square the residuals and add them up\n RS = residual*residual\n RSS = RS.sum()\n return(RSS)\n\ndef k_fold_cross_validation(k, l2_penalty, data, output_name, features_list):\n n=len(data)\n RSS = 0\n for i in range(0,k-1):\n start=(n*i)/k\n end=(n*(i+1))/k-1\n validation=data[start:end+1]\n training=data[0:start].append(data[end+1:n])\n model=graphlab.linear_regression.create(training,target=output_name,features = features_list, l2_penalty=l2_penalty,validation_set=None,verbose = False)\n prediction=model.predict(validation)\n rss=get_RSS(prediction, validation[output_name])\n RSS=RSS+rss\n \n value_err=RSS/k\n return value_err\n \n \n Once we have a function to compute the average validation error for a model, we can write a loop to find the model that minimizes the average validation error. Write a loop that does the following:\n* We will again be aiming to fit a 15th-order polynomial model using the `sqft_living` input\n* For `l2_penalty` in [10^1, 10^1.5, 10^2, 10^2.5, ..., 10^7] (to get this in Python, you can use this Numpy function: `np.logspace(1, 7, num=13)`.)\n * Run 10-fold cross-validation with `l2_penalty`\n* Report which L2 penalty produced the lowest average validation error.\n\nNote: since the degree of the polynomial is now fixed to 15, to make things faster, you should generate polynomial features in advance and re-use them throughout the loop. Make sure to use `train_valid_shuffled` when generating polynomial features!import numpy as np\n\npoly_data = polynomial_sframe(train_valid_shuffled['sqft_living'], 15)\n\nmy_features = poly_data.column_names()\n\npoly_data['price'] = train_valid_shuffled['price']\n\noutput_name='price'\n\nfor l2_penalty in np.logspace(1, 7, num=13):\n\n Val_err = k_fold_cross_validation(10, l2_penalty, poly_data,output_name, my_features)\n print (l2_penalty,Val_err)\n (10.0, 476529406003612.0)\n(31.622776601683793, 273938216651549.56)\n(100.0, 147909441657361.3)\n(316.22776601683796, 109066503581727.88)\n(1000.0, 108042622266425.23)\n(3162.2776601683795, 110458360712246.33)\n(10000.0, 121981388561659.53)\n(31622.776601683792, 153321500552248.84)\n(100000.0, 205811306546692.06)\n(316227.76601683791, 225313137467116.16)\n(1000000.0, 228808655773395.7)\n(3162277.6601683795, 231991693540554.44)\n(10000000.0, 233720728134190.1)\n***QUIZ QUESTIONS: What is the best value for the L2 penalty according to 10-fold validation?***You may find it useful to plot the k-fold cross-validation errors you have obtained to better understand the behavior of the method. # Plot the l2_penalty values in the x axis and the cross-validation error in the y axis.\n# Using plt.xscale('log') will make your plot more intuitive.\n\nOnce you found the best value for the L2 penalty using cross-validation, it is important to retrain a final model on all of the training data using this value of `l2_penalty`. This way, your final model will be trained on the entire dataset.poly1_data = polynomial_sframe(train_valid_shuffled['sqft_living'], 15) # use equivalent of `polynomial_sframe`\nmy_features = poly1_data.column_names() \noutput_name='price'\npoly1_data['price'] = train_valid_shuffled['price']\nmodel1 = graphlab.linear_regression.create(poly1_data, target = 'price', features = my_features, verbose = False, validation_set = None, l2_penalty=1000)\nVal_err = k_fold_cross_validation(10, 1000, poly1_data,output_name, my_features)\nprint Val_err1.08042622266e+14\n"},"license":{"kind":"string","value":"no_license"},"path":{"kind":"string","value":"/Machine Learning-Specialization/Regression/Week-4/One/week-4-ridge-regression-assignment-1-blank.ipynb"},"repo_name":{"kind":"string","value":"Kuntal-G/MOOC-Courses"},"chain_length":{"kind":"number","value":21,"string":"21"}}},{"rowIdx":4884,"cells":{"content":{"kind":"string","value":"# Project: Investigate a Dataset (TMDB movie data)\n\n## Table of Contents\n\nIntroduction\nData Wrangling\nExploratory Data Analysis\nConclusions\n\n## Introduction\n\nFor the Udacity Data Analyst Nano Degree project 2: investiagte a dataset, i have choosen the TMDB dataset out of the 5 dataset given. This data was originated from Kaggle, originally sourced from IMDB. In this project i will be cleaning and exploring the dataset, where questions below will be explored using some of the python tools learned from the class sessions, at the end i should be able to make sense of this data and answer these questions raised.\n\n\n__Questions:__\n\nwhich year most of the movies were released?What are the High Budget Movies from year to year?what is the runtime, popularity and budget trends over the years?import numpy as np\nimport pandas as pd\nfrom datetime import datetime\nimport matplotlib.pyplot as plt\n% matplotlib inline\n## Data Wrangling\n\nAfter observing the dataset that it is relatively clean and proposed questions for the analysis i will be keeping only relevent data, deleting data i dont need so that i can make the data easy and understandable.\n\n### General Properties# Load your data and print out a few lines. Perform operations to inspect data\ndf = pd.read_csv('tmdb_movies.csv')\ndf.head()\nAn initial view of the data, its headings, rows and columsdf.nunique()\ndf.info()\n# a view of datatypes for str stored as object.\nprint(\"I am imdb_id: \", type(df['imdb_id'][0]))\nprint(\"I am original_title: \", type(df['original_title'][0]))\nprint(\"I am cast: \", type(df['cast'][0]))\nprint(\"I am homepage: \", type(df['homepage'][0]))\nprint(\"I am director: \", type(df['director'][0]))\nprint(\"I am tagline: \", type(df['tagline'][0]))\nprint(\"I am keywords: \", type(df['keywords'][0]))\nprint(\"I am overview: \", type(df['overview'][0]))\nprint(\"I am genres: \", type(df['genres'][0]))\nprint(\"I am production_companies: \", type(df['production_companies'][0]))\nprint(\"I am release_date: \", type(df['release_date'][0]))I am imdb_id: \nI am original_title: \nI am cast: \nI am homepage: \nI am director: \nI am tagline: \nI am keywords: \nI am overview: \nI am genres: \nI am production_companies: \nI am release_date: \nA confirmation of the datatypes for all columns described above as 'object'.null_check = df.loc[:,['id','budget','revenue','popularity','release_year','director','release_date']].sort_values(by =['budget'], ascending=True)\nnull_check.head(9000)\n#identifying duplicated rows\nsum(df.duplicated())\ndf['is_duplicate_id'] = df.duplicated(['id'])\ndf_dupe_id_filter = df[df['is_duplicate_id'] == True]\ndf_dupe_id_filter.head()\ndf_id_check_dupe = df[df['id'] == 42194]\ndf_id_check_dupe.head()\n#drop duplicated ID row\ndf.drop_duplicates(subset=['id'],inplace=True)> **Tip**: You should _not_ perform too many operations in each cell. Create cells freely to explore your data. One option that you can take with this project is to do a lot of explorations in an initial notebook. These don't have to be organized, but make sure you use enough comments to understand the purpose of each code cell. Then, after you're done with your analysis, create a duplicate notebook where you will trim the excess and organize your steps so that you have a flowing, cohesive report.\n\n> **Tip**: Make sure that you keep your reader informed on the steps that you are taking in your investigation. Follow every code cell, or every set of related code cells, with a markdown cell to describe to the reader what was found in the preceding cell(s). Try to make it so that the reader can then understand what they will be seeing in the following cell(s).\n\n### Data Cleaning (Replace this with more specific notes!)# After discussing the structure of the data and any problems that need to be\n# cleaned, perform those cleaning steps in the second part of this section.# Drop columns:In this section, I've decided to drop columns that are extraneous to questions i would explore:1. imdb_id: this appears to relate to the previous IMDB data. Assumption is that this was left in by Kaggle to map the IMDB and TMDB ids together2. budget and revenue: since budget_adj and revenue_adj have already been normalised to 2010 levels for more direct comparision, these two columns are no longer required3. homepage, tagline, overview and keywords: seem unnecessary to include this for the type of intended analysis\n4. is_duplicate_title: is no longer necessarydf.drop(['imdb_id', 'budget', 'revenue', 'homepage', 'tagline', 'overview', 'keywords', 'is_duplicate_id'], axis=1, inplace=True)\ndf.head()#### updating datatypedf['release_date'] = pd.to_datetime(df['release_date'])\n\n# check it's worked\ntype(df['release_date'][0])\n## Exploratory Data Analysis\n\n> **Tip**: Now that you've trimmed and cleaned your data, you're ready to move on to exploration. Compute statistics and create visualizations with the goal of addressing the research questions that you posed in the Introduction section. It is recommended that you be systematic with your approach. Look at one variable at a time, and then follow it up by looking at relationships between variables.\n\n### Question 1: which year most of the movies were released?df['release_year'].value_counts()[0:10]\nThe year with most movies released from the data was year 2014 with 700 movies followed by year 2013, 2015 with 659 and 629 respectively.### Question 2: What are the High Budget Movies from year to year?def sort_by_budget(df):\n return df.sort_values(by = 'budget_adj',ascending = False)['original_title'].head(1)\ndf.groupby('release_year').apply(sort_by_budget)\ndf_movies = df.copy()\ndf_movies.info()\ndf_explore = df_movies.groupby('release_year').mean()\n\ndf_explore.hist(figsize=(12, 16));\ndf_explore['runtime'].hist()\nplt.xlabel('Runtime')\nplt.title('Runtime Over the Years');\ndf_explore['runtime'].describe() 1. As seen in the plots and functions above, popular runtimes over the years are between 104 and 107 minutes.\n 2. The distribution is right skewed.df_explore['popularity'].hist()\nplt.xlabel('Popularity')\nplt.title('Popularity Over the Years');\n\ndf_explore['popularity'].describe() 1.As evident from histogram and quartile percentages, maximum ratings received fall in the 0.47 to 0.62 ranges. 2.The distribution is skewed to right. 3.It is observed that no values lie in the range 0.73 to 0.89. This needs further scrutiny.df_explore['revenue'].hist()\nplt.xlabel('revenue')\nplt.title('Revenue Over the Years');\ndf_explore['revenue'].describe() 1. the distribution is skewed to the left. 2. Most movie revenues fall in the 3.257984e+07 to 4.293171e+07 ranges. `\n## Conclusions:At the end of my cleaning and explorations;\n1. Year 2014 had the highest number of release movies.\n2. The same year 2014 The Hobbit: the battle of five armies had the most budget.\n3. popular runtimes over the years are between 104 and 107 minutes.\n4. Most movie revenues fall in the 3.257984e+07 to 4.293171e+07 ranges.\nREFRENCES:\n 1. https://pandas.pydata.org/pandas-docs/stable/reference/frame.html\n \n 2. https://www.shanelynn.ie/using-pandas-dataframe-creating-editing-viewing-data-in-python/\n \n 3. https://developers.themoviedb.org/3/getting-started/popularity\n \n 4. https://prvnirupama.wordpress.com/2017/11/30/10-lessons-from-investigate-imdb-dataset/\n \n 5. https://www.dataquest.io/blog/jupyter-notebook-tips-tricks-shortcuts/\n## Submitting your Project \n\n> Before you submit your project, you need to create a .html or .pdf version of this notebook in the workspace here. To do that, run the code cell below. If it worked correctly, you should get a return code of 0, and you should see the generated .html file in the workspace directory (click on the orange Jupyter icon in the upper left).\n\n> Alternatively, you can download this report as .html via the **File** > **Download as** submenu, and then manually upload it into the workspace directory by clicking on the orange Jupyter icon in the upper left, then using the Upload button.\n\n> Once you've done this, you can submit your project by clicking on the \"Submit Project\" button in the lower right here. This will create and submit a zip file with this .ipynb doc and the .html or .pdf version you created. Congratulations!from subprocess import call\ncall(['python', '-m', 'nbconvert', 'Investigate_a_Dataset.ipynb'])"},"license":{"kind":"string","value":"no_license"},"path":{"kind":"string","value":"/Udacity Data Analyst Projects/project 2/Investigate_a_Dataset.ipynb"},"repo_name":{"kind":"string","value":"bakut/Udacity-Data-Analyst-Nano-Degree-projects"},"chain_length":{"kind":"number","value":13,"string":"13"}}},{"rowIdx":4885,"cells":{"content":{"kind":"string","value":"Table of Contents\n%matplotlib inline\n%load_ext autoreload\n%autoreload 4\n%autosave 120\nfrom fastai.io import *\nfrom fastai.structured import *\nfrom sklearn.ensemble import RandomForestRegressor, RandomForestClassifier\nfrom pandas_summary import DataFrameSummary\nfrom IPython.display import display\nfrom sklearn import metrics\nimport feather\nhist_trans = feather.read_dataframe('hist_trans')\nnew_hist_trans = feather.read_dataframe('new_hist_trans')\nDataFrameSummary(new_hist_trans).summary().T\ntemp1 = hist_trans.loc[hist_trans['card_id'].isin(['C_ID_05042ebd55'])]\ntemp2 = hist_trans.loc[hist_trans['card_id'].isin(['C_ID_5c240d6e3c'])]\ntemp = hist_trans.loc[hist_trans['card_id'].isin(['C_ID_05042ebd55', 'C_ID_5c240d6e3c'])]\ntemp1['card_id'] = temp1.card_id.astype('category')\ntemp1['merchant_id'] = temp1.merchant_id.astype('category')\ntemp1.sort_values('purchase_date', inplace=True)\nla = temp1.groupby('card_id').rolling('30D', on='purchase_date')['purchase_amount'].sum().reset_index()\nla\ntemp1.drop('month_rolling', axis=1, inplace=True)\npd.merge(temp1, la, 'inner', on='purchase_date', suffixes=['_x', '_rolling_amount'])\nla = temp1.groupby(['card_id', 'month_diff'])['purchase_amount'].sum().reset_index()\nla\nla.groupby('card_id').agg(['mean', 'max', 'min'])\ndef monthly_rolling(df, fe1, fe2):\n temp_df = df.sort_values('purchase_date')\n temp_df2 = temp_df.groupby('card_id').rolling('30D', on='purchase_date')[fe1].sum().reset_index()\n temp_df2 = temp_df2[['purchase_date', 'fe1']]\n return pd.merge\ntemp1\ntemp1.sort_values('purchase_date').groupby('card_id').rolling('30D', on='purchase_date')['installments'].sum()\ntemp1.sort_values('purchase_date').groupby('card_id')['purchase_date'].diff()\ntemp1.sort_values('purchase_date').T\ndef aggregate_per_month(history):\n grouped = history.groupby(['card_id', 'subsector_id'])['purchase_amount']\n\n agg_func = {\n 'purchase_amount': ['count', 'sum', 'max', 'mean']\n }\n\n intermediate_group = grouped.agg(agg_func)\n intermediate_group.columns = ['_'.join(col).strip() for col in intermediate_group.columns.values]\n intermediate_group.reset_index(inplace=True)\n\n final_group = intermediate_group.groupby('card_id').agg(['mean', 'sum', np.ptp, 'max'])\n final_group.columns = ['_'.join(col).strip() for col in final_group.columns.values]\n final_group.reset_index(inplace=True)\n \n return final_group\nla.groupby('card_id').agg(['mean', 'max', 'min', 'sum'])\ntemp1.T\naggregate_per_month(temp1).T\naggregate_per_month(temp).T\ndef successive_aggregates(df, field1, field2):\n t = df.groupby(['card_id', field1])[field2].mean()\n u = pd.DataFrame(t).reset_index().groupby('card_id')[field2].agg(['mean', 'min', 'max', 'std'])\n u.columns = [field1 + '_' + field2 + '_' + col for col in u.columns.values]\n u.reset_index(inplace=True)\n return u\nsuccessive_aggregates(temp, 'authorized_flag', 'purchase_amount').T\nsuccessive_aggregates(temp, 'category_1', 'purchase_amount').T\ndef get_cat_agg(df):\n agg_df = agg_on_cat(df, 'category_1', 'purchase_amount')\n agg_df = pd.merge(agg_df, agg_on_cat(df, 'category_2', 'purchase_amount'), on='card_id', how='left')\n agg_df = pd.merge(agg_df, agg_on_cat(df, 'category_3', 'purchase_amount'), on='card_id', how='left')\n agg_df = pd.merge(agg_df, agg_on_cat(df, 'authorized_flag', 'purchase_amount'), on='card_id', how='left')\n return agg_df\ndef agg_on_cat(df, category, feature):\n temp_df = df.pivot_table(index='card_id', columns=category, aggfunc={feature: ['sum', 'mean']})\n cols = [category + '_{0[2]}_{0[0]}_{0[1]}'.format(col) for col in temp_df.columns.tolist()]\n temp_df.columns = cols\n return temp_df\nget_cat_agg(temp)\ndef successive_aggregates(df, field1, field2):\n t = df.groupby(['card_id', field1])[field2].mean()\n u = pd.DataFrame(t).reset_index().groupby('card_id')[field2].agg(['mean', 'max', np.ptp, 'sum'])\n u.columns = [field1 + '_' + field2 + '_' + col for col in u.columns.values]\n u.reset_index(inplace=True)\n return u\nsuccessive_aggregates(temp1, 'state_id', 'purchase_amount').T\nsuccessive_aggregates(temp1, 'subsector_id', 'purchase_amount').T\naggregate_per_month(temp1).T\ntemp1.T\ndef percentile(n):\n def percentile_(x):\n return x.quantile(0.5)\n percentile_.__name__ = 'percentile_{:2.0f}'.format(n*100)\n return percentile_\ndef aggregate_new_trans(df):\n\n aggs = {}\n# aggs['purchase_amount'] = ['sum','max','min','mean','median', percentile(80), percentile(20), percentile(75), percentile(25)]\n aggs['purchase_amount'] = [('sum', 'sum'), ('pct_75', lambda x: np.percentile(x, q = 75)), \n ('pct_25', lambda x: np.percentile(x, q = 25)), ('mean', 'mean'), \n ('median', 'median'), ('max', 'max'), ('min', 'min'), ('var', 'var'), \n ('skew', 'skew')]\n \n new_df = df.groupby(['card_id']).agg(aggs)\n\n new_df.columns = ['_'.join(col).strip() for col in new_df.columns.values]\n new_df.reset_index(inplace=True)\n other_df = (df.groupby('card_id')\n .size()\n .reset_index(name='transactions_count'))\n \n new_df = pd.merge(other_df, new_df, on='card_id', how='left')\n return new_df\naggregate_new_trans(temp1)\ntemp1['purchase_amount'].describe( percentiles = [ 0.25, 0.75 ] )\ntemp1.T\ntemp1.groupby(['card_id'])['purchase_amount'].quantile(.2)\ntemp"},"license":{"kind":"string","value":"no_license"},"path":{"kind":"string","value":"/scrap pad 2.ipynb"},"repo_name":{"kind":"string","value":"mukeshpilaniya/kaggle"},"chain_length":{"kind":"number","value":1,"string":"1"}}},{"rowIdx":4886,"cells":{"content":{"kind":"string","value":"#### STAMP scan of CATH domains and structural alignments\nUsing domains defined in CATH and STAMP at different similarity levels/thresholds for generating structural alignments. Generating STAMP structural alignments for CATH superfamilies (structural groups and funfams) as they are provided by CATH.%run stamptools.py -h\n# before running STAMP for all CATH superfamilies\n# generated a superfamilies_analysis.txt summary with number of domains and SCGs/FFs\n%run cathtools.py -h\n# generates superfamilies_analysis.txt\n# updated to generate also a superfamilies_ssg_analysis.txt\n!qrsh -cwd -V pypy cathtools.py -a -l cath.log\n# sort by number of domains (column 3)\n!sort -n -k 3 ~/NOBACK/DB/superfamilies_analysis.txt > ~/NOBACK/DB/superfamilies_analysis_sorted.txt\n# sort by number of domains (column 4)\n!sort -n -k 4 ~/NOBACK/DB/superfamilies_analysis_ssg.txt > ~/NOBACK/DB/superfamilies_analysis_ssg_sorted.txt\n# sort by number of domains (column 4)\n!sort -n -k 4 ~/NOBACK/DB/superfamilies_analysis_ff.txt > ~/NOBACK/DB/superfamilies_analysis_ff_sorted.txt\n# running STAMP align for a particular superfamily (number 12)\n# Note that this method generates all the STAMP files necessary for initial scan\n# then runs STAMP and generates alignments and pdbs (superimpositions) for all\n# levels in the tree (treewise)\n%run stamptools.py -a 'ssg' -i '2.60.40.1520_1' -l align.log\n# same but on the cluster\n!qrsh -cwd -V python2.7 stamptools.py -a 'ssg' -i '2.60.40.1520_1' -l align.log\n# generating a list of superfamily_fam ids \n# if length > 1:\n!qrsh -cwd -V python2.7 stamptools.py -a 'ssg' -l stamp.log > ~/NOBACK/DB/superfamilies_ssg.txt\n!qrsh -cwd -V python2.7 stamptools.py -a 'ff' -l stamp.log > ~/NOBACK/DB/superfamilies_ff.txt\n# print(\"{}_{}\".format(nspf, nfam))\n# continue\n# running STAMP align for all SSGs in CATH \n!qsub -cwd -V -q 64bit.q jobhandler.py -n 50 -g -r 8 -stamp \"-- -a 'ssg'\" -f ~/NOBACK/DB/superfamilies_ssg.txt -l align1.log\n# running STAMP align for all FFs in CATH \n!qsub -cwd -V -q 64bit.q jobhandler.py -n 50 -g -r 8 -stamp \"-- -a 'ff'\" -f ~/NOBACK/DB/superfamilies_ff.txt -l align2.log\n# rerun some SCGs/FFs with some problem (as defined in stamp/stamp_summary.txt)\n!qsub -cwd -V -q 64bit.q jobhandler.py -n 50 -g -r 8 -stamp \"-- -a 'ssg'\" -f ~/NOBACK/DB/stamp/rerun.txt -l align.log\n# print out some quality metrics for each STAMP SCG/FF alignment\n!qrsh -cwd -V python2.7 stamptools.py -q 'ssg' -f ~/NOBACK/DB/superfamilies_ssg.txt -l align1.log \n!qrsh -cwd -V python2.7 stamptools.py -q 'ff' -f ~/NOBACK/DB/superfamilies_ff.txt -l align2.log \n# get summary information + variants\n!qrsh -cwd -V python2.7 stamptools.py -c 'ssg' -f ~/NOBACK/DB/superfamilies_ssg.txt -l align1.log \n!qrsh -cwd -V python2.7 stamptools.py -c 'ff' -f ~/NOBACK/DB/superfamilies_ff.txt -l align2.log\n# splitting the jobs with jobhandler\n!qsub -cwd -V -q 64bit.q jobhandler.py -n 100 -stamp \"-- -q 'ssg'\" -f ~/NOBACK/DB/superfamilies_ssg.txt -l align1.log\n!qsub -cwd -V -q 64bit.q jobhandler.py -n 100 -stamp \"-- -q 'ff'\" -f ~/NOBACK/DB/superfamilies_ff.txt -l align2.log\n# splitting the jobs with jobhandler\n!qsub -cwd -V -q 64bit.q jobhandler.py -n 200 -stamp \"-- -c 'ssg'\" -f ~/NOBACK/DB/superfamilies_ssg.txt -l align3.log\n!qsub -cwd -V -q 64bit.q jobhandler.py -n 200 -stamp \"-- -c 'ff'\" -f ~/NOBACK/DB/superfamilies_ff.txt -l align4.log\n# joining the files together\n!cat ~/NOBACK/DB/stamp_tmp/stats_ssg_*.txt > ~/NOBACK/DB/superfamilies_analysis_ssg_stats2.txt\n!cat ~/NOBACK/DB/stamp_tmp/stats_ff_*.txt > ~/NOBACK/DB/superfamilies_analysis_ff_stats2.txt \n# joining the files together\n!cat ~/NOBACK/DB/stamp_tmp/vars_ssg_*.txt > ~/NOBACK/DB/superfamilies_analysis_ssg_vars.txt\n!cat ~/NOBACK/DB/stamp_tmp/vars_ff_*.txt > ~/NOBACK/DB/superfamilies_analysis_ff_vars.txt \n# extend alignments\n!qsub -cwd -V stamptools.py -g 'ssg' -f ~/NOBACK/DB/superfamilies_ssg.txt -l align3.log\n!qsub -cwd -V stamptools.py -g 'ff' -f ~/NOBACK/DB/superfamilies_ff.txt -l align4.log\n\n!qsub -cwd -V -q 64bit.q jobhandler.py -n 100 -stamp \"-- -g 'ssg'\" -f ~/NOBACK/DB/superfamilies_ssg.txt -l align3.log\n!qsub -cwd -V -q 64bit.q jobhandler.py -n 100 -stamp \"-- -g 'ff'\" -f ~/NOBACK/DB/superfamilies_ff.txt -l align4.log"},"license":{"kind":"string","value":"non_permissive"},"path":{"kind":"string","value":"/notebooks/6_STAMP_scans_and_alignments.ipynb"},"repo_name":{"kind":"string","value":"biomadeira/ProIntVar"},"chain_length":{"kind":"number","value":1,"string":"1"}}},{"rowIdx":4887,"cells":{"content":{"kind":"string","value":"## M2 ModelWe can train Kingma's original M2 model in an unsupervised fashion.def px_graph(z, y):\n reuse = len(tf.get_collection(tf.GraphKeys.VARIABLES, scope='px')) > 0\n # -- p(x)\n with tf.variable_scope('px'):\n zy = tf.concat((z, y), 1, name='zy/concat')\n # h1 = Dense(zy, 512, 'layer1', tf.nn.relu, reuse=reuse)\n # h2 = Dense(h1, 512, 'layer2', tf.nn.relu, reuse=reuse)\n # px_logit = Dense(h2, 784, 'logit', reuse=reuse)\n h3 = Dense(zy, 28 * 14 * 14, 'layer3', tf.nn.relu, reuse = reuse )\n h3 = tf.reshape(h3,[-1, 14, 14, 28])\n h4 = Conv2d_transpose(h3, 28, [3, 3], [1, 1], activation=tf.nn.relu, reuse = reuse, scope = \"layer4\")\n h5 = Conv2d_transpose(h4, 28, [3, 3], [1, 1], activation=tf.nn.relu, reuse = reuse, scope = \"layer5\")\n h6 = Conv2d_transpose(h5, 28, [3, 3], [2, 2], activation=tf.nn.relu, reuse = reuse, scope = \"layer6\")\n # h7 = Conv2d_transpose(h6, 28, [3, 3], [1, 1], activation=tf.nn.relu, reuse = reuse, scope = \"layer7\")\n px_logit = Conv2d(h6, 1, [2, 2], [1, 1] ,scope = \"layer7\", reuse = reuse)\n px_logit = tf.contrib.layers.flatten(px_logit)\n return px_logit\n\ntf.reset_default_graph()\n# print(Placeholder)\nx = Placeholder((None, 784), name = 'x')\n\n# binarize data and create a y \"placeholder\"\nwith tf.name_scope('x_binarized'):\n xb = tf.cast(tf.greater(x, tf.random_uniform(tf.shape(x), 0, 1)), tf.float32)\nwith tf.name_scope('y_'):\n y_ = tf.fill(tf.stack([tf.shape(x)[0], 10]), 0.0)\n\n# propose distribution over y \nqy_logit, qy = qy_graph(xb)\n\n# for each proposed y, infer z and reconstruct x\nz, zm, zv, px_logit = [[None] * 10 for i in xrange(4)]\nfor i in xrange(10):\n with tf.name_scope('graphs/hot_at{:d}'.format(i)):\n y = tf.add(y_, Constant(np.eye(10)[i], name='hot_at_{:d}'.format(i)))\n z[i], zm[i], zv[i] = qz_graph(xb, y)\n px_logit[i] = px_graph(z[i], y)\n\n# Aggressive name scoping for pretty graph visualization :P\nwith tf.name_scope('loss'):\n with tf.name_scope('neg_entropy'):\n nent = -cross_entropy_with_logits(logits = qy_logit, labels = qy)\n losses = [None] * 10\n for i in xrange(10):\n with tf.name_scope('loss_at{:d}'.format(i)):\n losses[i] = labeled_loss(xb, px_logit[i], z[i], zm[i], zv[i], Constant(0), Constant(1))\n with tf.name_scope('final_loss'):\n loss = tf.add_n([nent] + [qy[:, i] * losses[i] for i in xrange(10)])\nshow_graph(tf.get_default_graph().as_graph_def())\ntrain_step = tf.train.AdamOptimizer().minimize(loss)\nsess = tf.Session()\nsess.run(tf.initialize_all_variables())\n# sess.run(tf.global_variables_initializer()) # Change initialization protocol depending on tensorflow version\nsess_info = (sess, qy_logit, nent, loss, train_step)\ntrain(None, mnist, sess_info, epochs=2) tr_ent, tr_loss, t_ent, t_loss, t_acc, epoch \n 3.07e-01, 1.37e+02, 3.10e-01, 1.36e+02, 3.93e-01, 1\n tr_ent, tr_loss, t_ent, t_loss, t_acc, epoch \n 2.88e-01, 1.23e+02, 2.85e-01, 1.22e+02, 4.22e-01, 2\n# Modified M2 (Gaussian mixture hidden layer)With some thought, we can modified M2 to implicitly be a latent variable model with a Gaussian mixture stochastic layer. Training is a bit finnicky, so you might have to run it a few times before it works properly.method = 'relu'\n\ndef custom_layer(zy, reuse):\n # Here are 3 choices for what to do with zy\n # I leave this as hyperparameter\n if method == 'identity':\n return zy\n elif method == 'relu':\n return tf.nn.relu(zy)\n elif method == 'layer':\n return Dense(zy, 512, 'layer1', tf.nn.relu, reuse=reuse)\n else:\n raise Exception('Undefined method')\n\ndef px_graph(z, y):\n reuse = len(tf.get_collection(tf.GraphKeys.VARIABLES, scope='px')) > 0\n # -- transform z to be a sample from one of the Gaussian mixture components\n with tf.variable_scope('z_transform'):\n zm = Dense(y, 64, 'zm', reuse=reuse)\n zv = Dense(y, 64, 'zv', tf.nn.softplus, reuse=reuse)\n # h1 = Dense(y,128, 'h1', tf.nn.relu, reuse=reuse)\n # h2 = Dense(h1,128, 'h2', tf.nn.relu, reuse=reuse)\n # zm = Dense(h2, 64, 'zm', reuse=reuse)\n # zv = Dense(h2, 64, 'zv', tf.nn.softplus, reuse=reuse)\n # -- p(x)\n with tf.variable_scope('px'):\n with tf.name_scope('layer1'):\n zy = zm + tf.sqrt(zv) * z\n h1 = custom_layer(zy, reuse)\n h2 = Dense(h1, 512, 'layer2', tf.nn.relu, reuse=reuse)\n # h3 = tf.nn.dropout(h2, 0.5, name = 'layer3')\n # h4 = Dense(h2, 512, 'layer4', tf.nn.relu, reuse = reuse)\n # h5 = tf.nn.dropout(h4, 0.5, name = 'layer5')\n # # px_logit = Dense(h2, 784, 'logit', reuse=reuse)\n # px_logit = Dense(h5, 784, 'logit', reuse=reuse)\n # h2 = Dense(h1, 512, 'layer2', tf.nn.relu, reuse = reuse)\n h3 = Dense(h2, 28 * 14 * 14, 'layer3', tf.nn.relu, reuse = reuse )\n h3 = tf.reshape(h3,[-1, 14, 14, 28])\n h4 = Conv2d_transpose(h3, 28, [3, 3], [1, 1], activation=tf.nn.relu, reuse = reuse, scope = \"layer4\")\n h5 = Conv2d_transpose(h4, 28, [3, 3], [1, 1], activation=tf.nn.relu, reuse = reuse, scope = \"layer5\")\n h6 = Conv2d_transpose(h5, 28, [3, 3], [2, 2], activation=tf.nn.relu, reuse = reuse, scope = \"layer6\")\n # h7 = Conv2d_transpose(h6, 28, [3, 3], [1, 1], activation=tf.nn.relu, reuse = reuse, scope = \"layer7\")\n px_logit = Conv2d(h6, 1, [2, 2], [1, 1] ,scope = \"layer7\", reuse = reuse)\n px_logit = tf.contrib.layers.flatten(px_logit)\n return px_logit\n\ntf.reset_default_graph()\nx = Placeholder((None, 784), name ='x')\n\n# binarize data and create a y \"placeholder\"\nwith tf.name_scope('x_binarized'):\n xb = tf.cast(tf.greater(x, tf.random_uniform(tf.shape(x), 0, 1)), tf.float32)\nwith tf.name_scope('y_'):\n y_ = tf.fill(tf.stack([tf.shape(x)[0], 10]), 0.0)\n\n# propose distribution over y \nqy_logit, qy = qy_graph(xb)\n\n# for each proposed y, infer z and reconstruct x\nz, zm, zv, px_logit = [[None] * 10 for i in xrange(4)]\nfor i in xrange(10):\n with tf.name_scope('graphs/hot_at{:d}'.format(i)):\n y = tf.add(y_, Constant(np.eye(10)[i], name='hot_at_{:d}'.format(i)))\n z[i], zm[i], zv[i] = qz_graph(xb, y)\n px_logit[i] = px_graph(z[i], y)\n\n# Aggressive name scoping for pretty graph visualization :P\nwith tf.name_scope('loss'):\n with tf.name_scope('neg_entropy'):\n nent = -cross_entropy_with_logits(logits = qy_logit, labels = qy)\n losses = [None] * 10\n for i in xrange(10):\n with tf.name_scope('loss_at{:d}'.format(i)):\n losses[i] = labeled_loss(xb, px_logit[i], z[i], zm[i], zv[i], Constant(0), Constant(1))\n with tf.name_scope('final_loss'):\n loss = tf.add_n([nent] + [qy[:, i] * losses[i] for i in xrange(10)])\nshow_graph(tf.get_default_graph().as_graph_def())\ntrain_step = tf.train.AdamOptimizer().minimize(loss)\nsess = tf.Session()\nsess.run(tf.initialize_all_variables())\n# sess.run(tf.global_variables_initializer()) # Change initialization protocol depending on tensorflow version\nsess_info = (sess, qy_logit, nent, loss, train_step)\ntrain(None, mnist, sess_info, epochs=2) tr_ent, tr_loss, t_ent, t_loss, t_acc, epoch \n 2.57e-02, 1.40e+02, 2.68e-02, 1.39e+02, 1.91e-01, 1\n tr_ent, tr_loss, t_ent, t_loss, t_acc, epoch \n 2.48e-02, 1.22e+02, 2.39e-02, 1.21e+02, 1.96e-01, 2\n# Explicit Gaussian Mixture VAEWhy be implicit when we can explicitly train a Gaussian Mixture VAE? So here's code for doing that. Unlike the modified M2, GMVAE is very stable. def px_graph(z, y):\n reuse = len(tf.get_collection(tf.GraphKeys.VARIABLES, scope='px')) > 0\n # -- p(z)\n with tf.variable_scope('pz'):\n h1 = Dense(y, 128, 'h1', tf.nn.relu, reuse=reuse)\n h2 = Dense(h1, 128, 'h2', tf.nn.relu, reuse=reuse)\n zm = Dense(h2, 64, 'zm', reuse=reuse)\n zv = Dense(h2, 64, 'zv', tf.nn.softplus, reuse=reuse)\n # zv = Dense(y, 64, 'zv', tf.nn.softplus, reuse=reuse)\n # zm = Dense(y, 64, 'zm', reuse=reuse)\n # -- p(x)\n with tf.variable_scope('px'):\n h1 = Dense(z, 512, 'layer1', tf.nn.relu, reuse=reuse)\n # h2 = Dense(h1, 512, 'layer2', tf.nn.relu, reuse=reuse)\n # h3 = Dense(h2, 512, 'layer3', tf.nn.relu, reuse=reuse)\n # px_logit = Dense(h3, 784, 'logit', reuse=reuse)\n # h2 = Dense(h1, 512, 'layer2', tf.nn.relu, reuse = reuse)\n h3 = Dense(h1, 28 * 14 * 14, 'layer3', tf.nn.relu, reuse = reuse )\n h3 = tf.reshape(h3,[-1, 14, 14, 28])\n h4 = Conv2d_transpose(h3, 28, [3, 3], [1, 1], activation=tf.nn.relu, reuse = reuse, scope = \"layer4\")\n h5 = Conv2d_transpose(h4, 28, [3, 3], [1, 1], activation=tf.nn.relu, reuse = reuse, scope = \"layer5\")\n h6 = Conv2d_transpose(h5, 28, [3, 3], [2, 2], activation=tf.nn.relu, reuse = reuse, scope = \"layer6\")\n # h7 = Conv2d_transpose(h6, 28, [3, 3], [1, 1], activation=tf.nn.relu, reuse = reuse, scope = \"layer7\")\n # h8 = Conv2d_transpose(h7, 28, [3, 3], [1, 1], activation=tf.nn.relu, reuse = reuse, scope = \"layer8\")\n px_logit = Conv2d(h6, 1, [2, 2], [1, 1] ,scope = \"layer7\", reuse = reuse)\n px_logit = tf.contrib.layers.flatten(px_logit)\n return zm, zv, px_logit\ntf.reset_default_graph()\nx = Placeholder((None, 784), name = 'x')\n\n# binarize data and create a y \"placeholder\"\nwith tf.name_scope('x_binarized'):\n xb = tf.cast(tf.greater(x, tf.random_uniform(tf.shape(x), 0, 1)), tf.float32)\nwith tf.name_scope('y_'):\n y_ = tf.fill(tf.stack([tf.shape(x)[0], 10]), 0.0)\n\n# propose distribution over y\nqy_logit, qy = qy_graph(xb)\n\n# for each proposed y, infer z and reconstruct x\nz, zm, zv, zm_prior, zv_prior, px_logit = [[None] * 10 for i in xrange(6)]\nfor i in xrange(10):\n with tf.name_scope('graphs/hot_at{:d}'.format(i)):\n y = tf.add(y_, Constant(np.eye(10)[i], name='hot_at_{:d}'.format(i)))\n z[i], zm[i], zv[i] = qz_graph(xb, y)\n zm_prior[i], zv_prior[i], px_logit[i] = px_graph(z[i], y)\n\n# Aggressive name scoping for pretty graph visualization :P\nwith tf.name_scope('loss'):\n with tf.name_scope('neg_entropy'):\n nent = -cross_entropy_with_logits(logits = qy_logit, labels = qy)\n losses = [None] * 10\n for i in xrange(10):\n with tf.name_scope('loss_at{:d}'.format(i)):\n losses[i] = labeled_loss(xb, px_logit[i], z[i], zm[i], zv[i], zm_prior[i], zv_prior[i])\n with tf.name_scope('final_loss'):\n loss = tf.add_n([nent] + [qy[:, i] * losses[i] for i in xrange(10)])\nshow_graph(tf.get_default_graph().as_graph_def())\ntrain_step = tf.train.AdamOptimizer().minimize(loss)\nsess = tf.Session()\nsess.run(tf.initialize_all_variables())\n# sess.run(tf.global_variables_initializer()) # Change initialization protocol depending on tensorflow version\nsess_info = (sess, qy_logit, nent, loss, train_step)\ntrain(None, mnist, sess_info, epochs=2) tr_ent, tr_loss, t_ent, t_loss, t_acc, epoch \n 4.93e-02, 1.41e+02, 5.26e-02, 1.40e+02, 1.88e-01, 1\n tr_ent, tr_loss, t_ent, t_loss, t_acc, epoch \n 4.77e-02, 1.22e+02, 4.50e-02, 1.21e+02, 2.03e-01, 2\n# Evaluationimport glob\nimport pandas as pd\nimport seaborn as sns\nimport os.path\n%pylab inline\ndef prune_rows(arr, k):\n delete_rows = []\n for i in xrange(len(arr)):\n if np.isnan(arr[i, k]):\n delete_rows += [i]\n return np.delete(arr, delete_rows, axis=0)[:, :k]\n\ndef plot_from_csv(glob_str, axes, color_idx):\n dfs = [pd.read_csv(f) for f in glob.glob('logs/{:s}.log*'.format(glob_str))]\n df = (pd.concat(dfs, axis=1, keys=range(len(dfs)))\n .swaplevel(0, 1, axis=1) \n .sortlevel(axis=1))\n df = df[:200].apply(pd.to_numeric) \n k = 199\n ax1, ax2, ax3 = axes\n\n sns.tsplot(data=prune_rows(df['{:>10s}'.format('t_ent')].values.T, k), \n ax=ax1, \n condition=glob_str,\n color=sns.color_palette()[color_idx])\n ax1.set_ylim(0,3)\n ax1.set_xlabel('Epochs')\n ax1.set_ylabel('Conditional Entropy')\n\n sns.tsplot(data=prune_rows(df['{:>10s}'.format('t_loss')].values.T, k), \n ax=ax2, \n condition=glob_str,\n color=sns.color_palette()[color_idx])\n ax2.set_xlabel('Epochs')\n ax2.set_ylabel('Loss')\n\n sns.tsplot(data=prune_rows(df['{:>10s}'.format('t_acc')].values.T, k), \n ax=ax3, \n condition=glob_str,\n color=sns.color_palette()[color_idx])\n ax3.set_xlabel('Epochs')\n ax3.set_ylabel('Accuracy')\nf, axes = plt.subplots(1,3, figsize=(20, 5))\nplot_from_csv('m2', axes, 0)\nplt.savefig('images/m2.png')\nf, axes = plt.subplots(1,3, figsize=(20, 5))\nplot_from_csv('modified_m2_method=relu', axes, 1)\nplt.savefig('images/modified_m2_method=relu.png')\nf, axes = plt.subplots(1,3, figsize=(20, 5))\nplot_from_csv('gmvae', axes, 2)\nplt.savefig('images/gmvae.png')\nf, axes = plt.subplots(1,3, figsize=(20, 5))\nplot_from_csv('m2', axes, 0)\nplot_from_csv('modified_m2_method=relu', axes, 1)\nplot_from_csv('gmvae', axes, 2)\nplt.savefig('images/combined.png')/Users/huxiaojing/tensorflow/lib/python2.7/site-packages/ipykernel_launcher.py:12: FutureWarning: sortlevel is deprecated, use sort_index(level= ...)\n if sys.path[0] == '':\n"},"license":{"kind":"string","value":"permissive"},"path":{"kind":"string","value":"/experiments.ipynb"},"repo_name":{"kind":"string","value":"SharynHu/vae-clustering-cnn"},"chain_length":{"kind":"number","value":4,"string":"4"}}},{"rowIdx":4888,"cells":{"content":{"kind":"string","value":"- Look through sklearn datasets\n- Find a datasetnot used in example\n- Train a random forest model on your dataset\n- Determine which forest was the most accurate using the .score() function\n- Visualize feature importance with a bar plotfrom sklearn.datasets import load_boston\nfrom sklearn.ensemble import RandomForestRegressor\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\nmodel = RandomForestRegressor(n_estimators=10,random_state=0, n_jobs= 10)\nboston = load_boston()\n\ndf = pd.DataFrame(boston.data, columns=boston.feature_names) # load the dataset as a pandas data frame\n\n# Train\nmodel.fit(df, boston.target)\nprint (\"score: %f\" % model.score(df, boston.target))\n\n\nbestScore={0:0}\n\nfor forest, tree in enumerate(model.estimators_):\n score = tree.score(df, boston.target)\n if score > list(bestScore.values())[0]:\n bestScore={forest:score}\n\nprint(bestScore)\n# Extract single tree\n\n\ntreeSTD = np.std([tree.feature_importances_ for tree in model.estimators_], axis=0)\nimportances = model.feature_importances_\n\nindices = np.argsort(importances)[::-1]\ntotalFeatures = len(boston.data[0])\n\n\n# Print the feature ranking\nprint(\"\\nFeature ranking:\")\nfor f in range(totalFeatures):\n print(\"%d. feature %s (%f)\" % (f + 1, boston.feature_names[indices[f]], importances[indices[f]]))\n\n \n\n# Plot the feature importances of the forest\nfig = plt.figure(figsize=[12,6])\nplt.title(\"Feature importances\")\nplt.bar(range(totalFeatures), importances[indices],\n color=\"r\", yerr=treeSTD[indices], align=\"center\")\nplt.xticks(range(totalFeatures), boston.feature_names[indices])\nplt.show()\nscore: 0.973934\n{8: 0.9369247737000006}\n\nFeature ranking:\n1. feature RM (0.524906)\n2. feature LSTAT (0.273742)\n3. feature DIS (0.063590)\n4. feature CRIM (0.034805)\n5. feature NOX (0.028503)\n6. feature TAX (0.017679)\n7. feature B (0.016519)\n8. feature PTRATIO (0.015914)\n9. feature AGE (0.010513)\n10. feature RAD (0.006009)\n11. feature INDUS (0.004543)\n12. feature ZN (0.003065)\n13. feature CHAS (0.000211)\n"},"license":{"kind":"string","value":"no_license"},"path":{"kind":"string","value":"/Eduonix Edegree/4.Complete Guide to Machine Learning using Python/3. Random Forest/3.0 - Chapter Problem/3.0 - My Answer.ipynb"},"repo_name":{"kind":"string","value":"pvdwijdeven/ML_eduonix"},"chain_length":{"kind":"number","value":1,"string":"1"}}},{"rowIdx":4889,"cells":{"content":{"kind":"string","value":"# Computing saliency masks with the PAIRML saliency library in TF1\n\nThis notebook demonstrates a number of saliency mask techniques, augmented with the `SmoothGrad` technique, using the Inception V3 convolutional neural network. The intention of this notebook is to have as few dependencies as possible to show how to compute masks.\n\nThis notebook shows the following techniques, alongside with the `SmoothGrad` augmentation:\n\n* Vanilla Gradients\n ([paper](https://scholar.google.com/scholar?q=Visualizing+higher-layer+features+of+a+deep+network&btnG=&hl=en&as_sdt=0%2C22),\n [paper](https://arxiv.org/abs/1312.6034))\n* Guided Backpropogation ([paper](https://arxiv.org/abs/1412.6806))\n* Integrated Gradients ([paper](https://arxiv.org/abs/1703.01365))\n* XRAI ([paper](https://arxiv.org/abs/1906.02825))\n* Grad-CAM ([paper](https://arxiv.org/abs/1610.02391))\n* Blur IG ([paper](https://arxiv.org/abs/2004.03383))\n* Guided IG ([paper](https://arxiv.org/abs/2106.09788))\n\nThis notebook assumes you have the `saliency` pip package installed. To install run (use `pip3` for python 3.x):\n```\npip install saliency[tf1] tensorflow_hub\n```# Boilerplate imports.\nimport tensorflow.compat.v1 as tf\nimport numpy as np\nimport PIL.Image\nfrom matplotlib import pylab as P\n\n# From our repository.\nimport saliency.tf1 as saliency\n\n%matplotlib inlineINFO:tensorflow:Enabling eager execution\nINFO:tensorflow:Enabling v2 tensorshape\nINFO:tensorflow:Enabling resource variables\nINFO:tensorflow:Enabling tensor equality\nINFO:tensorflow:Enabling control flow v2\n### Utility methods# Boilerplate methods.\ndef ShowImage(im, title='', ax=None):\n if ax is None:\n P.figure()\n P.axis('off')\n im = (im * 255).astype(np.uint8)\n P.imshow(im)\n P.title(title)\n\ndef ShowGrayscaleImage(im, title='', ax=None):\n if ax is None:\n P.figure()\n P.axis('off')\n\n P.imshow(im, cmap=P.cm.gray, vmin=0, vmax=1)\n P.title(title)\n\ndef ShowHeatMap(im, title, ax=None):\n if ax is None:\n P.figure()\n P.axis('off')\n P.imshow(im, cmap='inferno')\n P.title(title)\n\ndef LoadImage(file_path):\n im = PIL.Image.open(file_path)\n im = np.asarray(im)\n return im / 255### Loading the Inception model graph\n\nRun the following cell to download the network. This assumes you have the `tensorflow_hub` pip package installed. To install run (use `pip3` for python 3.x):\n```\npip install tensorflow-hub\n```\nAlternatively, the pretrained network can be downloaded [here](https://tfhub.dev/google/imagenet/inception_v3/classification/1).import tensorflow_hub as hub\n\nmodel_path = \"https://tfhub.dev/google/imagenet/inception_v3/classification/3\"\ngraph = tf.Graph()\nsess = tf.Session(graph=graph)\nwith graph.as_default():\n hub.Module(model_path)\n sess.run(tf.global_variables_initializer())\n sess.run(tf.tables_initializer())### Adding a single logit tensor for which we want to compute the maskwith graph.as_default():\n images = graph.get_tensor_by_name('module/hub_input/images:0')\n logits = graph.get_tensor_by_name('module/InceptionV3/Logits/SpatialSqueeze:0')\n # Construct the scalar neuron tensor.\n neuron_selector = tf.placeholder(tf.int32)\n y = logits[:,neuron_selector]\n\n # Construct tensor for predictions.\n prediction = tf.argmax(logits, 1)### Load an image and infer# Load the image\nim = LoadImage('./doberman.png')\n\n# Show the image\nShowImage(im)\n\n# Make a prediction. \nprediction_class = sess.run(prediction, feed_dict = {images: [im]})[0]\n\nprint(\"Prediction class: \" + str(prediction_class)) # Should be a doberman, class idx = 237Prediction class: 237\n### Vanilla Gradient & SmoothGrad# Construct the saliency object. This doesn't yet compute the saliency mask, it just sets up the necessary ops.\ngradient_saliency = saliency.GradientSaliency(graph, sess, y, images)\n\n# Compute the vanilla mask and the smoothed mask.\nvanilla_mask_3d = gradient_saliency.GetMask(im, feed_dict = {neuron_selector: prediction_class})\nsmoothgrad_mask_3d = gradient_saliency.GetSmoothedMask(im, feed_dict = {neuron_selector: prediction_class})\n\n# Call the visualization methods to convert the 3D tensors to 2D grayscale.\nvanilla_mask_grayscale = saliency.VisualizeImageGrayscale(vanilla_mask_3d)\nsmoothgrad_mask_grayscale = saliency.VisualizeImageGrayscale(smoothgrad_mask_3d)\n\n# Set up matplot lib figures.\nROWS = 1\nCOLS = 2\nUPSCALE_FACTOR = 10\nP.figure(figsize=(ROWS * UPSCALE_FACTOR, COLS * UPSCALE_FACTOR))\n\n# Render the saliency masks.\nShowGrayscaleImage(vanilla_mask_grayscale, title='Vanilla Gradient', ax=P.subplot(ROWS, COLS, 1))\nShowGrayscaleImage(smoothgrad_mask_grayscale, title='SmoothGrad', ax=P.subplot(ROWS, COLS, 2))### Guided Backprop & SmoothGrad# Construct the saliency object. This doesn't yet compute the saliency mask, it just sets up the necessary ops.\n# NOTE: GuidedBackprop creates a copy of the given graph to override the gradient.\n# Don't construct too many of these!\nguided_backprop = saliency.GuidedBackprop(graph, sess, y, images)\n\n# Compute the vanilla mask and the smoothed mask.\nvanilla_guided_backprop_mask_3d = guided_backprop.GetMask(\n im, feed_dict = {neuron_selector: prediction_class})\nsmoothgrad_guided_backprop_mask_3d = guided_backprop.GetSmoothedMask(\n im, feed_dict = {neuron_selector: prediction_class})\n\n# Call the visualization methods to convert the 3D tensors to 2D grayscale.\nvanilla_mask_grayscale = saliency.VisualizeImageGrayscale(vanilla_guided_backprop_mask_3d)\nsmoothgrad_mask_grayscale = saliency.VisualizeImageGrayscale(smoothgrad_guided_backprop_mask_3d)\n\n# Set up matplot lib figures.\nROWS = 1\nCOLS = 2\nUPSCALE_FACTOR = 10\nP.figure(figsize=(ROWS * UPSCALE_FACTOR, COLS * UPSCALE_FACTOR))\n\n# Render the saliency masks.\nShowGrayscaleImage(vanilla_mask_grayscale, title='Vanilla Guided Backprop', ax=P.subplot(ROWS, COLS, 1))\nShowGrayscaleImage(smoothgrad_mask_grayscale, title='SmoothGrad Guided Backprop', ax=P.subplot(ROWS, COLS, 2))INFO:tensorflow:Restoring parameters from /tmp/guided_backprop_ckpt\n### Integrated Gradients & SmoothGrad# Construct the saliency object. This doesn't yet compute the saliency mask, it just sets up the necessary ops.\nintegrated_gradients = saliency.IntegratedGradients(graph, sess, y, images)\n\n# Baseline is a black image.\nbaseline = np.zeros(im.shape)\n\n# Compute the vanilla mask and the smoothed mask.\nvanilla_integrated_gradients_mask_3d = integrated_gradients.GetMask(\n im, feed_dict = {neuron_selector: prediction_class}, x_steps=25, x_baseline=baseline, batch_size=20)\n# Smoothed mask for integrated gradients will take a while since we are doing nsamples * nsamples computations.\nsmoothgrad_integrated_gradients_mask_3d = integrated_gradients.GetSmoothedMask(\n im, feed_dict = {neuron_selector: prediction_class}, x_steps=25, x_baseline=baseline, batch_size=20)\n\n# Call the visualization methods to convert the 3D tensors to 2D grayscale.\nvanilla_mask_grayscale = saliency.VisualizeImageGrayscale(vanilla_integrated_gradients_mask_3d)\nsmoothgrad_mask_grayscale = saliency.VisualizeImageGrayscale(smoothgrad_integrated_gradients_mask_3d)\n\n# Set up matplot lib figures.\nROWS = 1\nCOLS = 2\nUPSCALE_FACTOR = 10\nP.figure(figsize=(ROWS * UPSCALE_FACTOR, COLS * UPSCALE_FACTOR))\n\n# Render the saliency masks.\nShowGrayscaleImage(vanilla_mask_grayscale, title='Vanilla Integrated Gradients', ax=P.subplot(ROWS, COLS, 1))\nShowGrayscaleImage(smoothgrad_mask_grayscale, title='Smoothgrad Integrated Gradients', ax=P.subplot(ROWS, COLS, 2))### XRAI Full and Fast# Construct the saliency object. This doesn't yet compute the saliency mask, it just sets up the necessary ops.\nxrai_object = saliency.XRAI(graph, sess, y, images)\n\n# Compute XRAI attributions with default parameters\nxrai_attributions = xrai_object.GetMask(im, feed_dict={neuron_selector: prediction_class}, batch_size=20)\n\n# Set up matplot lib figures.\nROWS = 1\nCOLS = 3\nUPSCALE_FACTOR = 20\nP.figure(figsize=(ROWS * UPSCALE_FACTOR, COLS * UPSCALE_FACTOR))\n\n# Show original image\nShowImage(im, title='Original Image', ax=P.subplot(ROWS, COLS, 1))\n\n# Show XRAI heatmap attributions\nShowHeatMap(xrai_attributions, title='XRAI Heatmap', ax=P.subplot(ROWS, COLS, 2))\n\n# Show most salient 30% of the image\nmask = xrai_attributions >= np.percentile(xrai_attributions, 70)\nim_mask = np.array(im)\nim_mask[~mask] = 0\nShowImage(im_mask, title='Top 30%', ax=P.subplot(ROWS, COLS, 3))\n# Create XRAIParameters and set the algorithm to fast mode which will produce an approximate result.\nxrai_params = saliency.XRAIParameters()\nxrai_params.algorithm = 'fast'\n\n# Compute XRAI attributions with fast algorithm\nxrai_attributions_fast = xrai_object.GetMask(im, feed_dict={neuron_selector: prediction_class}, extra_parameters=xrai_params, batch_size=20)\n\n# Set up matplot lib figures.\nROWS = 1\nCOLS = 3\nUPSCALE_FACTOR = 20\nP.figure(figsize=(ROWS * UPSCALE_FACTOR, COLS * UPSCALE_FACTOR))\n\n# Show original image\nShowImage(im, title='Original Image', ax=P.subplot(ROWS, COLS, 1))\n\n# Show XRAI heatmap attributions\nShowHeatMap(xrai_attributions_fast, title='XRAI Heatmap', ax=P.subplot(ROWS, COLS, 2))\n\n# Show most salient 30% of the image\nmask = xrai_attributions_fast >= np.percentile(xrai_attributions_fast, 70)\nim_mask = np.array(im)\nim_mask[~mask] = 0\nShowImage(im_mask, 'Top 30%', ax=P.subplot(ROWS, COLS, 3))### Grad-CAM# Compare Grad-CAM and Smoothgrad with Grad-CAM. Note: This will take a long time to run.\n\n# GradCAM uses the final convolution layer, in this case \"Mixed_7c\"\nwith graph.as_default():\n conv_tensor = graph.get_tensor_by_name('module/InceptionV3/InceptionV3/Mixed_7c/concat:0')\n\n# Construct the saliency object. This doesn't yet compute the saliency mask, it just sets up the necessary ops.\ngrad_cam = saliency.GradCam(graph, sess, y, images, conv_tensor)\n\n# Compute the Grad-CAM mask and the smoothed mask.\ngrad_cam_mask_3d = grad_cam.GetMask(im, feed_dict = {neuron_selector: prediction_class})\nsmoothgrad_grad_cam_mask_3d = grad_cam.GetSmoothedMask(im, feed_dict = {neuron_selector: prediction_class})\n\n# Call the visualization methods to convert the 3D tensors to 2D grayscale.\ngrad_cam_mask_grayscale = saliency.VisualizeImageGrayscale(grad_cam_mask_3d)\nsmoothgrad_grad_cam_mask_grayscale = saliency.VisualizeImageGrayscale(smoothgrad_grad_cam_mask_3d)\n\n# Set up matplot lib figures.\nROWS = 1\nCOLS = 2\nUPSCALE_FACTOR = 10\nP.figure(figsize=(ROWS * UPSCALE_FACTOR, COLS * UPSCALE_FACTOR))\n\n# Render the saliency masks.\nShowGrayscaleImage(grad_cam_mask_grayscale, title='Grad-CAM', ax=P.subplot(ROWS, COLS, 1))\nShowGrayscaleImage(smoothgrad_grad_cam_mask_grayscale, title='SmoothGrad Grad-CAM', ax=P.subplot(ROWS, COLS, 2))### Guided IG# Construct the saliency object. This doesn't yet compute the saliency mask, it just sets up the necessary ops.\nintegrated_gradients = saliency.IntegratedGradients(graph, sess, y, images)\nguided_ig = saliency.GuidedIG(graph, sess, y, images)\n\n# Baseline is a black image for vanilla integrated gradients.\nbaseline = np.zeros(im.shape)\n\n# Compute the vanilla mask and the Guided IG mask.\nvanilla_integrated_gradients_mask_3d = integrated_gradients.GetMask(\n im, feed_dict = {neuron_selector: prediction_class}, x_steps=25, x_baseline=baseline, batch_size=20)\nguided_ig_mask_3d = guided_ig.GetMask(\n im, feed_dict = {neuron_selector: prediction_class}, x_steps=25, x_baseline=baseline, max_dist=0.2, fraction=0.5)\n\n# Call the visualization methods to convert the 3D tensors to 2D grayscale.\nvanilla_mask_grayscale = saliency.VisualizeImageGrayscale(vanilla_integrated_gradients_mask_3d)\nguided_ig_mask_grayscale = saliency.VisualizeImageGrayscale(guided_ig_mask_3d)\n\n# Set up matplot lib figures.\nROWS = 1\nCOLS = 3\nUPSCALE_FACTOR = 20\nP.figure(figsize=(ROWS * UPSCALE_FACTOR, COLS * UPSCALE_FACTOR))\n\n# Render the saliency masks.\nShowImage(im, title='Original Image', ax=P.subplot(ROWS, COLS, 1))\nShowGrayscaleImage(vanilla_mask_grayscale, title='Vanilla Integrated Gradients', ax=P.subplot(ROWS, COLS, 2))\nShowGrayscaleImage(guided_ig_mask_grayscale, title='Guided Integrated Gradients', ax=P.subplot(ROWS, COLS, 3))### Blur IG# Construct the saliency object. This doesn't yet compute the saliency mask, it just sets up the necessary ops.\nintegrated_gradients = saliency.IntegratedGradients(graph, sess, y, images)\nblur_ig = saliency.BlurIG(graph, sess, y, images)\n\n# Baseline is a black image for vanilla integrated gradients.\nbaseline = np.zeros(im.shape)\n\n# Compute the vanilla mask and the Blur IG mask.\nvanilla_integrated_gradients_mask_3d = integrated_gradients.GetMask(\n im, feed_dict = {neuron_selector: prediction_class}, x_steps=25, x_baseline=baseline, batch_size=20)\nblur_ig_mask_3d = blur_ig.GetMask(\n im, feed_dict = {neuron_selector: prediction_class}, batch_size=20)\n\n# Call the visualization methods to convert the 3D tensors to 2D grayscale.\nvanilla_mask_grayscale = saliency.VisualizeImageGrayscale(vanilla_integrated_gradients_mask_3d)\nblur_ig_mask_grayscale = saliency.VisualizeImageGrayscale(blur_ig_mask_3d)\n\n# Set up matplot lib figures.\nROWS = 1\nCOLS = 2\nUPSCALE_FACTOR = 10\nP.figure(figsize=(ROWS * UPSCALE_FACTOR, COLS * UPSCALE_FACTOR))\n\n# Render the saliency masks.\nShowGrayscaleImage(vanilla_mask_grayscale, title='Vanilla Integrated Gradients', ax=P.subplot(ROWS, COLS, 1))\nShowGrayscaleImage(blur_ig_mask_grayscale, title='Blur Integrated Gradients', ax=P.subplot(ROWS, COLS, 2))\n# Compare BlurIG and Smoothgrad with BlurIG. Note: This will take a long time to run.\n# Construct the saliency object. This doesn't yet compute the saliency mask, it just sets up the necessary ops.\nblur_ig = saliency.BlurIG(graph, sess, y, images)\n\n# Compute the Blur IG mask and Smoothgrad+BlurIG mask.\nblur_ig_mask_3d = blur_ig.GetMask(im, feed_dict = {neuron_selector: prediction_class}, batch_size=20)\n# Smoothed mask for BlurIG will take a while since we are doing nsamples * nsamples computations.\nsmooth_blur_ig_mask_3d = blur_ig.GetSmoothedMask(im, feed_dict = {neuron_selector: prediction_class}, batch_size=20)\n\n# Call the visualization methods to convert the 3D tensors to 2D grayscale.\nblur_ig_mask_grayscale = saliency.VisualizeImageGrayscale(blur_ig_mask_3d)\nsmooth_blur_ig_mask_grayscale = saliency.VisualizeImageGrayscale(smooth_blur_ig_mask_3d)\n\n# Set up matplot lib figures.\nROWS = 1\nCOLS = 2\nUPSCALE_FACTOR = 10\nP.figure(figsize=(ROWS * UPSCALE_FACTOR, COLS * UPSCALE_FACTOR))\n\n# Render the saliency masks.\nShowGrayscaleImage(blur_ig_mask_grayscale, title='Blur Integrated Gradients', ax=P.subplot(ROWS, COLS, 1))\nShowGrayscaleImage(smooth_blur_ig_mask_grayscale, title='Smoothgrad Blur IG', ax=P.subplot(ROWS, COLS, 2))\n"},"license":{"kind":"string","value":"permissive"},"path":{"kind":"string","value":"/Examples_tf1.ipynb"},"repo_name":{"kind":"string","value":"Pandinosaurus/saliency"},"chain_length":{"kind":"number","value":12,"string":"12"}}},{"rowIdx":4890,"cells":{"content":{"kind":"string","value":"# 可**制御**性判別\n- 可制御性行列のランクが最大なら可制御Uc = matlab.ctrb(A, B)\nRc = np.linalg.matrix_rank(Uc)\nprint(\"ランク:\",Rc,\"サイズ:\",min(Uc.shape))ランク: 2 サイズ: 2\n# 可**観測**性判別\n- 可観測性行列のランクが最大なら可制御Uo = matlab.obsv(A, C)\nRo = np.linalg.matrix_rank(Uo)\nprint(\"ランク:\",Ro,\"サイズ:\",min(Uo.shape))ランク: 2 サイズ: 2\n"},"license":{"kind":"string","value":"no_license"},"path":{"kind":"string","value":"/4章5章6章らへん.ipynb"},"repo_name":{"kind":"string","value":"gomi-kuzu/wakariyasui_modern_control"},"chain_length":{"kind":"number","value":2,"string":"2"}}},{"rowIdx":4891,"cells":{"content":{"kind":"string","value":"## Lending Club loan analysis\nThis notebook looks at data from the Lending Club database. The database and csv file containing all loan info and data dictionary were obtained from a Kaggle link (https://www.kaggle.com/wendykan/lending-club-loan-data). This database contains information relating to loans given from 2008 to 2015.\n* **Section 1:Exploratory analyses** is done to look at general features such as *average loan*, *interest rate* and *intallments* over time, *loan purposes* and *amounts* and *status* of loans. This is done in **SQL** and output files of the SQL query results are used here.\n* **Section 2: Only good and bad loans will be examined** (based on information for loan status from above. Also done in **SQL** and output files of the SQL query results are used here.\nGood and bad loans will be *stratified* by loan grade, US states the loans come from, loan amount, employment length, income, home ownership and installment. This is to examine if there are trends between type of loan and these features.\nFor good/bad loans by states, the maps appear when viewed on NBViewer (https://nbviewer.jupyter.org/github/manieshablakey/loan-assessment-model/blob/master/loan-credit.ipynb) even if it may not appear on github.\n* **Section 3: Features selection:** look through all columns and determine which features will need to be kept for the loan prediction model. Those that contain information from the future/ not relevant will be removed.\n* **Section 4: Data cleaning:** Convert catgorical variables to integers. Remove rows with missing values. \n* **Section 5:** **Machine Learning models: 2 models** are used to make predictions: **1) K-Nearest Neighbors model** **2) Logistic Regression model.**import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\n%matplotlib inline\nimport sklearn\n\nfrom sklearn import metrics\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.metrics import roc_curve\nfrom sklearn.metrics import roc_auc_score\n\nimport folium as folium\nimport os\nimport seaborn as sns### Section 1: Exploratory analysesLook at total records in the database.total= pd.read_csv(\"output/tot.csv\")\ntot = total[\"total records\"][0]\nprint(\"The total number of records in the database is: {}.\".format(tot))The total number of records in the database is: 887383.\nLook at the trends in average loan amount and average interest from 2008 to 2015.\nFrom 2008-09 and around 2012-2013, even though average interest rates were increasing, average loan amounts were also high/increasing. \n# average loan.\nloan_month = pd.read_csv(\"output/avg-loan.csv\")\nxmonth = loan_month[\"loan_year\"]\nyavg_loan = loan_month[\"average_loan\"]\nmonth_cat = np.arange(len(xmonth))\n\n# average interest.\nint_month = pd.read_csv(\"output/avg-interest.csv\")\nyavg_int = int_month[\"interest_rate\"]\nmonth_cat = np.arange(len(xmonth))\n\n# plot figure\nfig = plt.figure(figsize=(15, 5))\nax1 = fig.add_subplot(111)\n\n# figure for avg loan.\nax1.set_xlabel(\"Year\",fontsize=15)\nax1.set_ylabel(\"Average loan amount ($)\",fontsize=15)\nax1.set_title(\"Average annual loan amount (from 2008 - 2015)\",fontsize=15)\nax1.plot(xmonth, yavg_loan, alpha=0.85, label=\"Average loan\")\n\n# figure for avg interest.\nax2 = ax1.twinx()\nax2.set_ylabel(\"Average interest rate (%)\",fontsize=15)\nax2.plot(xmonth, yavg_int, alpha=0.8, \n label=\"Avg interest rate (%)\",\n color=\"red\")\n\nax1.legend(loc=\"upper left\")\nax2.legend(loc=\"upper right\")\n\nfig.tight_layout()\nplt.show()Look at trends in average loan installment from 2008 to 2015. This has increased from around $275/month is 2008 to around $425 in 2015.# avg installment\ninstall = pd.read_csv(\"output/avg-installment.csv\")\navg_instal = pd.DataFrame({\"loan_year\": [2008, 2009, 2010, 2011, 2012, 2013, 2014, 2015], \n \"installment\": [279.0, 330.0, 303.0, 338.0, 422.0, 452.0, 422.0, 422.0]})\navg_instal.plot(kind=\"line\", x=\"loan_year\", y=\"installment\", color=\"b\", alpha=0.3, figsize=(15,5), fontsize=15)\n\nplt.xlabel('Year', fontsize=15)\nplt.ylabel('Average instalment ($)', fontsize=15)\nplt.title('Average loan installment (from 2008 - 2015)', fontsize=15)\nplt.show()What are the loans used for? We can see that the bulk of it is used for debt consolidation, followed by credit card payments.# loan purpose\npurpose = pd.read_csv(\"output/purpose.csv\")\nx_purpose = purpose[\"purpose\"]\ny_perct = purpose[\"Percentage\"]\npurpose_cat = np.arange(len(x_purpose))\nplt.bar(x_purpose, y_perct)\nplt.xticks(purpose_cat, x_purpose, rotation=90)\nplt.ylabel(\"% of loans\")\nplt.title(\"Purpose of loan\", fontsize=15)\nplt.show()Categorize loan amounts into 4 categorize to see what are the most common loan amounts give. \nMost loans re between $5000- $15000.# loan categories\nlf = pd.read_csv(\"output/loan-cat.csv\")\nlf = pd.DataFrame({\"loan_amount\": [1, 2, 3, 4], \"total\" : [75513, 460027, 248174, 103669]})\nlf[\"loan_amount\"] = lf[\"loan_amount\"].replace({1: \"under $5000\", 2: \"$5000 - $15000\", 3: \"$15000 - $25000\", 4: \"$25000 and above\"})\nlf.plot(kind=\"bar\", x=\"loan_amount\", y=\"total\", color=\"g\", alpha=0.7)\nplt.xlabel('Loan category', fontsize=15)\nplt.ylabel('No of loans')\nplt.title('Loan categories')\nplt.show()What is the distribution of loan status of all loans in the database? Most are current loans (> 600,00).# loan distribution\nloan_type = pd.read_csv(\"output/loan-status-overall.csv\")\nloan_type### Section 2: Exploratory analyses stratified by good/bad loans.\nFor the rest of the analyses in this notebook, loans will be categorized as follows:\n- Good loans (this will be made up of fully paid loans).\n- Bad loans (will consist of charged off loans. \n- Remaining will not be taken into account as they are either current or there isn't enought information to determine status.\n- To get a better sense of what determines if a loan will be paid off or not, we will look at the good/bad loans only.# good/bad loan and numbers\nloan_type = pd.read_csv(\"output/loan-status.csv\")\nprint(\"No of bad loans and good loans, respectively:\")\nloan_typeNo of bad loans and good loans, respectively:\nTotal number of good & bad loans, from the above and plotted as a pie chart in %. We can see that >80% of loans are good loans.loan_status_tot = loan_type[\"number\"].sum()\nprint(\"The total number of good & bad loans is: {}.\".format(loan_status_tot))\n\n# % good/bad loans.\nvalues = [45248, 207724]\ncolors = [\"r\", \"b\"]\nlabels = [\"Charged Off\", \"Fully Paid\"]\nplt.pie(values, colors=colors, labels=labels, autopct=\"%0.1f%%\")\nplt.title(\"Percentage of good and bad loans\")\nplt.show()The total number of good & bad loans is: 252972.\nDollar value of bad and good loans# total loan value for each type.\nloan_val = pd.read_csv(\"output/loans-total.csv\")\nloan_val.plot(kind=\"bar\", x=\"loan_status\", y=\"dollar_total\", color=\"b\", alpha=0.7)\n\nplt.xlabel('Loan status', fontsize=15)\nplt.ylabel('Total loan amount ($)')\nplt.title('Total loan value')\nplt.show()Good and bad loans by loan grade. Most good loans range from grade A to C. Most bad loans range from grade B to D.# loan type by loan grade.\nlg = pd.read_csv(\"output/loans-grade.csv\")\n# loan_grade\n\n# set width of bar\nbarWidth = 0.3\nfig = plt.figure(figsize=(8, 4))\n\n# set height of bar\nxgrade = lg[\"grade\"].drop_duplicates()\ny_bad = lg[lg[\"loan_status\"]== \"Charged Off\"][\"loans number\"]\ny_good = lg[lg[\"loan_status\"] == \"Fully Paid\"][\"loans number\"]\n\n# Set position of bar on X axis\nr1 = np.arange(len(xgrade))\nr2 = [x + barWidth for x in r1]\nr3 = [x + barWidth for x in r2]\n \n# Make the plot\nplt.bar(r1, y_bad, color=\"r\", alpha=0.7, width=barWidth, label=\"Bad loans\")\nplt.bar(r2, y_good, color=\"g\", alpha=0.7, width=barWidth, label=\"Good loans\")\n \n# Add xticks on the middle of the group bars\nplt.xlabel(\"Loan grades\", fontsize=15)\nplt.xticks([r + barWidth for r in range(len(xgrade))], [\"A\", \"B\", \"C\", \"D\", \"E\", \"F\", \"G\"])\n \n# Create legend & Show graphic\nplt.legend()\nfig.tight_layout()\nplt.show()From above, we see that the majority of good/bad loans fall into grades A to D. Therefore, we will look at grade A to D to see what the main purposes of taking loans are. Again, the main reason across all 4 grades is for debt consolidation, followed by credit card payment.# loan purpose, by grade.\n# Setting up a figure to accomodate 4 grades given in the dataset. \nfig, axes = plt.subplots(nrows=2, ncols=2, figsize=(14, 10))\nfig.suptitle(\"Fig 1- Purpose of loans, by loan grade\", fontweight=\"bold\", size=18)\n\n# Grade A loans, by purpose.\ngp_a = pd.read_csv(\"output/grade-a-purpose.csv\")\ngp_a.plot(kind=\"bar\", ax=axes[0,0], x=\"purpose\", y=\"count\")\naxes[0,0].set_title(\"Grade A loans\", fontsize=15)\naxes[0,0].set_xlabel(\"Loan purpose\")\naxes[0,0].set_ylabel(\"No of loans\")\n\n# Grade B loans, by purpose.\ngp_b = pd.read_csv(\"output/grade-b-purpose.csv\")\ngp_b.plot(kind=\"bar\", ax=axes[0,1], x=\"purpose\", y=\"count\")\naxes[0,1].set_title(\"Grade B loans\", fontsize=15)\naxes[0,1].set_xlabel(\"Loan purpose\")\naxes[0,1].set_ylabel(\"count\")\n\n# Grade C loans, by purpose.\ngp_c = pd.read_csv(\"output/grade-c-purpose.csv\")\ngp_c.plot(kind=\"bar\", ax=axes[1,0], x=\"purpose\", y=\"count\")\naxes[1,0].set_title(\"Grade C loans\", fontsize=15)\naxes[1,0].set_xlabel(\"Loan purpose\")\naxes[1,0].set_ylabel(\"count\")\n\n# Grade D loans, by purpose.\ngp_d = pd.read_csv(\"output/grade-d-purpose.csv\")\ngp_d.plot(kind=\"bar\", ax=axes[1,1], x=\"purpose\", y=\"count\")\naxes[1,1].set_title(\"Grade D loans\", fontsize=15)\naxes[1,1].set_xlabel(\"Loan purpose\")\naxes[1,1].set_ylabel(\"count\")\n\nfig.tight_layout(pad=7, w_pad=12, h_pad=3)\nfig.show()/usr/local/lib/python3.7/site-packages/matplotlib/figure.py:445: UserWarning: Matplotlib is currently using module://ipykernel.pylab.backend_inline, which is a non-GUI backend, so cannot show the figure.\n % get_backend())\nNext, we'll look at the distribution of good loans across US states. The map describes the number of loans. We can see from the figure below that the highest number of good loans come from California. This is followed by Texas, Florida and New York. These are also the 4 most populous states in the US. # map loads when viewed with nbviewer (https://nbviewer.jupyter.org/github/manieshablakey/loan-assessment-model/blob/master/loan-credit.ipynb)\n# good loans by state\ngood_loans_state = pd.read_csv(\"output/good-loans-state.csv\")\n\n# Vol of loans by state.\n# load map shape (US states)\nstate_geo = os.path.join(\"data\", \"us-states.json\")\n\n# load loan data for each state\ndf = os.path.join(\"output\", \"good-loans-state.csv\")\ngood_loans = pd.read_csv(df)\n\n# initialize map\nm = folium.Map(location=[37, -102], zoom_start=4)\n\n# set parameters for map, add color.\nfolium.Choropleth(\n geo_data=state_geo,\n name=\"choropleth\",\n data=good_loans,\n columns=[\"addr_state\", \"loans_vol\"],\n key_on=\"feature.id\",\n fill_color=\"BuGn\",\n fill_opacity=0.8,\n line_opacity=0.3,\n legend_name=\"No of good loans\"\n).add_to(m)\nfolium.LayerControl().add_to(m)\nm\n\n# # Save to html\n# m.save(\"good_loans_map.html\")Let's look at the distribution of bad loans across US states. The map describes the number of loans. We can see from the figure below that the highest number of bad loans come from California. Next is New York state. This is followed by Texas and Florida.# map loads when viewed with nbviewer (https://nbviewer.jupyter.org/github/manieshablakey/loan-assessment-model/blob/master/loan-credit.ipynb)\n# bad loans by state\nbad_loans_state = pd.read_csv(\"output/bad-loans-state.csv\")\n\n# Vol of loans by state.\n# load map shape (US states)\nstate_geo = os.path.join(\"data\", \"us-states.json\")\n\n# load loan data for each state\ndl = os.path.join(\"output\", \"bad-loans-state.csv\")\nbad_loans = pd.read_csv(dl)\n\n# initialize map\nm = folium.Map(location=[37, -102], zoom_start=4)\n\n# set parameters for map, add color.\nfolium.Choropleth(\n geo_data=state_geo,\n name='choropleth',\n data=bad_loans,\n columns=['addr_state', 'loans_vol'],\n key_on='feature.id',\n fill_color='RdPu',\n fill_opacity=0.8,\n line_opacity=0.3,\n legend_name='No of bad loans'\n).add_to(m)\nfolium.LayerControl().add_to(m)\nm\n\n# # Save to html\n# m.save(\"bad_loans_map.html\")Bulk of good & bad loans both come from 5000 to 25000 dollar category.# loan amount by loan status\nls = pd.read_csv(\"output/loan-status-amount.csv\")\nls = ls.reindex(index = [3, 2, 0, 1, 7, 6, 4, 5])\n\n# set width of bar\nbarWidth = 0.3\nfig = plt.figure(figsize=(8, 4))\n\n# set height of bar\nx_grade = ls[\"loans\"].drop_duplicates()\ny_bad = ls[ls[\"loan_status\"] == \"Charged Off\"][\"total\"]\ny_good = ls[ls[\"loan_status\"] == \"Fully Paid\"][\"total\"]\n\n# Set position of bar on X axis\nr1 = np.arange(len(x_grade))\nr2 = [x + barWidth for x in r1]\nr3 = [x + barWidth for x in r2]\n \n# Make the plot\nplt.bar(r1, y_bad, color=\"r\", alpha=0.7, width=barWidth, label=\"Bad loans\")\nplt.bar(r2, y_good, color=\"g\", alpha=0.7, width=barWidth, label=\"Good loans\")\n \n# Add xticks on the middle of the group bars\nplt.xlabel(\"Loan numbers, by loan amount\", fontweight=\"bold\")\nplt.xticks([r + barWidth for r in range(len(x_grade))], [\"<$5000\", \"$5000-25000\", \"$25000-30000\", \"$30000-35000\"])\n \n# Create legend & Show graphic\nplt.legend()\nfig.tight_layout()\nplt.show()Looking at the employment lengths of those with good loans and bad loans, we see that the trends are very similar, with the majority in > 10 years of employment across both loan categories.# employment length by loan type\nel = pd.read_csv(\"output/employment-loan.csv\")\nel = el.reindex(index = [10, 0, 2, 3, 4, 5, 6, 7, 8, 9, 1, 21, 11, 13, 14, 15, 16, 17, 18, 19, 20, 12])\n\n# set width of bar\nbarWidth = 0.3\nfig = plt.figure(figsize=(8, 4))\n\n# set height of bar\nx_length = el[\"emp_length\"].drop_duplicates()\ny_bad = el[el[\"loan_status\"] == \"Charged Off\"][\"number\"]\ny_good = el[el[\"loan_status\"] == \"Fully Paid\"][\"number\"]\n\n# Set position of bar on X axis\nr1 = np.arange(len(x_length))\nr2 = [x + barWidth for x in r1]\nr3 = [x + barWidth for x in r2]\n \n# Make the plot\nplt.bar(r1, y_bad, color=\"r\", alpha=0.7, width=barWidth, label=\"Bad loans\")\nplt.bar(r2, y_good, color=\"g\", alpha=0.7,width=barWidth, label=\"Good loans\")\n \n# Add xticks on the middle of the group bars\nplt.xlabel(\"Length of employment (in years)\", fontweight=\"bold\")\nplt.xticks([r + barWidth for r in range(len(x_length))], [\"< 1\", \"1\", \"2\", \"3\", \"4\", \"5\", \"6\", \"7\", \"8\", \"9\", \"10+\"])\n \n# Create legend & show graph\nplt.legend()\nfig.tight_layout()\nplt.show()When we look at income levels by loan time, the patterns are also similar, with the most loans in both loan types given to those with an annual income between 50,000 and 100,000 dollars.# income by loan type\nli = pd.read_csv(\"output/income-loan.csv\")\nli = li.reindex(index = [2, 0, 1, 3, 6, 4, 5, 7])\n\n# set width of bar\nbarWidth = 0.3\nfig = plt.figure(figsize=(8,4))\n\n# set height of bar\nx_length = li[\"income\"].drop_duplicates()\ny_bad = li[li[\"loan_status\"] == \"Charged Off\"][\"total\"]\ny_good = li[li[\"loan_status\"] == \"Fully Paid\"][\"total\"]\n\n# set position of bar on X asia\nr1 = np.arange(len(x_length))\nr2 = [x + barWidth for x in r1]\nr3 = [x + barWidth for x in r2]\n \n# Make the plot\nplt.bar(r1, y_bad, color=\"r\", alpha=0.7, width=barWidth, label=\"Bad loans\")\nplt.bar(r2, y_good, color=\"g\", alpha=0.7, width=barWidth, label=\"Good loans\")\n \n# Add xticks on the middle of the group bars\nplt.xlabel(\"Income by loan status\", fontweight=\"bold\")\nplt.xticks([r + barWidth for r in range(len(x_length))], [\"< $25000\", \"25000− 50000\", \"50000− 100000\", \"over $100000\"])\n \n# Create legend & Show graphic\nplt.legend()\nfig.tight_layout()\nplt.show()Looking at home ownership, we see that the majority of those with good loans have a mortgage on their homes. The majority of those with bad loans are tied between those with mortgages and renters.# home ownership by loan type\nlh = pd.read_csv(\"output/home-loan.csv\")\n\n# set width of bar\nbarWidth = 0.3\nfig = plt.figure(figsize=(8, 4))\n\n# set height of bar\nx_length = lh[\"home_ownership\"].drop_duplicates()\ny_bad = lh[lh[\"loan_status\"] == \"Charged Off\"][\"total\"]\ny_good = lh[lh[\"loan_status\"] == \"Fully Paid\"][\"total\"]\n\n# Set position of bar on X axis\nr1 = np.arange(len(x_length))\nr2 = [x + barWidth for x in r1]\nr3 = [x + barWidth for x in r2]\n \n# Make the plot\nplt.bar(r1, y_bad, color=\"r\", alpha=0.7, width=barWidth, label=\"Bad loans\")\nplt.bar(r2, y_good, color=\"g\", alpha=0.7, width=barWidth, label=\"Good loans\")\n \n# Add xticks on the middle of the group bars\nplt.xlabel(\"Home ownership by loan status\", fontweight=\"bold\")\nplt.xticks([r + barWidth for r in range(len(x_length))], [\"MORTGAGE\", \"OWN\", \"RENT\"])\n \n# Create legend & Show graphic\nplt.legend()\nfig.tight_layout()\nplt.show()The trends between installment amounts and loan type are very similar across both categories. Most installments are between $250-$500 for each category.# installment by loan type\ninst = pd.read_csv(\"output/loan-installment.csv\")\ninst = inst.reindex( index = [3, 0, 1, 2, 4, 8, 5, 6, 7, 9])\ninst\n\n# set width of bar\nbarWidth = 0.3\nfig = plt.figure(figsize=(6, 4))\n\n# set height of bar\nbars1 = [10155, 19631, 10163, 3896, 1403]\nbars2 = [57603, 88509, 40555, 15140, 5917]\n\n# set position of bar on X asia\nr1 = np.arange(len(bars1))\nr2 = [x + barWidth for x in r1]\n \n# Make the plot\nplt.bar(r1, bars1, color=\"r\", alpha=0.7, width=barWidth, label=\"Bad loans\")\nplt.bar(r2, bars2, color=\"g\", alpha=0.7, width=barWidth, label=\"Good loans\")\n \n# Add xticks on the middle of the group bars\nplt.xlabel(\"Installment by loan status ($)\", fontsize=15)\nplt.xticks([r + barWidth for r in range(len(bars1))], [\"< 250\", \"250− 500\", \"500− 750\", \"750- 1000\", \"over 1000\"], rotation=90)\n \n# Create legend & Show graphic\nplt.legend()\nplt.show()### Section 3: Features selection\nIn this section, we look at all columns in the dataset to determine if the features will be relevant/useful to predict loan status outcome. Those that are not useful or contain information from the future (after loans have been assigned) will be removed from the dataset.df = pd.read_csv(\"data/loan.csv\", low_memory=False)\nhalf_count = len(df) / 2\n\n# Drop columns with > 50% missing values.\ndf = df.dropna(thresh=half_count, axis=1)\n\n# Drop the following column which which does not provide useful \ndf = df.drop([\"url\"], axis=1)\n\n# Now, look at first 5 rows of dataset.\ndf.head()\n# look at shape of data. There are 887,379 rows and 52 columns in the current dataset.\ndf.shape\n# These are all the column names. \n# The LCDataDictionary file has been provided in the data folder and gives a description of each column.\ndf.columnsEach column needs to be examined to determine if it is useful to be kept in the dataset. To do this, the columns are divided into groups. For each group, every column will be compared to column dictionary to see what it represents. If they contain information that is not useful or obtained only after loans are assigned, they are removed from the dataset as such features will not contribute to a loan outcome prediction model. # First 20 columns.\ndf.iloc[:, :20].head()From the above, the following columns are removed are the information they provide is not useful/redundant:\nid, member_id, sub_grade(grade info has been kept),int_rate (grade info has accounted for this), emp_title, issue_d and title (purpose is kept).\nThese columns are also removed as they contain information from after loans are given: funded_amt & funded_amt_inv# remove the following columns.\ndrop_cols1 = ['id','member_id','funded_amnt','funded_amnt_inv',\n 'int_rate','sub_grade','emp_title','issue_d']\ndf = df.drop(drop_cols1,axis=1)After the above columns are dropped, we look at the remaining columns. \nLooking at the table below, zip_code column is dropped as the information given is not useful (we have state names). All other columns below are dropped as they contain future info that is not valuable for a loan outcome prediction model.# next lot of columns\ndf.iloc[50:60, 19:38].head()\n# remove the following columns.\ndrop_cols2 = [ 'zip_code','out_prncp','out_prncp_inv',\n 'total_pymnt','total_pymnt_inv', 'delinq_2yrs', 'initial_list_status']\ndf = df.drop(drop_cols2, axis=1)\n\ndrop_cols3 = ['total_rec_prncp','total_rec_int', 'total_rec_late_fee',\n 'recoveries', 'collection_recovery_fee', 'last_pymnt_d',\n 'last_pymnt_amnt']\ndf = df.drop(drop_cols3, axis=1)Since dataset column numbers changed between the first drop of columns and the second, take a look at the remaining columns to see if there are others that need to be removed.df.columnsLooking at the above columns remaining, we remove the following columns that future information/ info that is not useful.# additional columns to drop\ndrop_cols4 = ['title', 'inq_last_6mths','next_pymnt_d', 'collections_12_mths_ex_med', 'policy_code', 'application_type',\n 'acc_now_delinq', 'tot_coll_amt', 'tot_cur_bal', 'total_rev_hi_lim']\ndf = df.drop(drop_cols4, axis=1)\ndf.head()From the above dataframe, let's look at the unique instances of the payment plan feature# pymnt_plan unique\npyment = df[\"pymnt_plan\"].unique().tolist()\n\nprint(\"No of instances of y in pyment_plan: {}\".format((df[\"pymnt_plan\"]== \"y\").sum()))\nprint(\"No of instances of n in pyment_plan: {}\".format((df[\"pymnt_plan\"]== \"n\").sum()))No of instances of y in pyment_plan: 10\nNo of instances of n in pyment_plan: 887369\nThere are too few instances of y compared to n to make meaningful inferences, in pymnt_plan columns. Therefore, it can be dropped.df = df.drop(\"pymnt_plan\", axis=1)Take another look at the columns to make sure all are useful/do not contain future info.df.columns#### Target variable: Loan status\nThe aim of the machine learning models are to predict good/bad loans. Therefore, loan status is the outcome variable.\nFrom the exploratory analysis section above, we saw there were several categories of loans. For this model, we will only consider the \"Fully Paid\" (good) loans and \"Charged Off\" (bad) loans. We filter the dataset to only included these rows,df = df[(df[\"loan_status\"] == \"Fully Paid\") | (df[\"loan_status\"] == \"Charged Off\")]\ndf[\"loan_status\"].unique().tolist()From the features selected from the dataset, look at general statistics of numerical columns.df.describe()\n# save data.\ndf.to_csv(\"data/updated_loan_data.csv\", index=False) ### Section 4: Data Cleaning\n Start by loading the dataset with relevant columns as saved above and take a look at the first 5 rows.updated_loans = pd.read_csv(\"data/updated_loan_data.csv\")\nprint(updated_loans.shape)\nupdated_loans.head()\n# Look at all the data types in the dataset.\nupdated_loans.dtypes\n# Look at all missing values in the dataset.\nnull_values = updated_loans.isnull().sum()\nnull_values\n# Drop rows with missing values\nupdated_loans = updated_loans.dropna()\nupdated_loans.isnull().sum()#### Categorical variables\nTake a look at any one row in the dataset to see how object variables are formatted.obj_col = updated_loans.select_dtypes(include=[\"object\"])\nprint(obj_col.iloc[5])term 36 months\ngrade E\nemp_length 9 years\nhome_ownership RENT\nverification_status Source Verified\nloan_status Fully Paid\npurpose car\naddr_state CA\nearliest_cr_line Jan-2007\nlast_credit_pull_d Dec-2014\nName: 5, dtype: object\nLooking at this row, drop date value columns as these are not useful for the model. The columns are earliest_cr_line & last_credit_pull_ddrop_cols5 = [\"earliest_cr_line\", \"last_credit_pull_d\"]\nupdated_loans = updated_loans.drop(drop_cols5, axis=1)In order to convert categorical variables into integers, let's first look at the unique instances for each of the categorical variables left.term_unique = updated_loans[\"term\"].unique()\ngrade_unique = updated_loans[\"grade\"].unique()\nemp_length_unique = updated_loans[\"emp_length\"].unique()\nhome_own_unique = updated_loans[\"home_ownership\"].unique()\nver_status_unique = updated_loans[\"verification_status\"].unique()\nloan_unique = updated_loans[\"loan_status\"].unique()\npurpose_unique = updated_loans[\"purpose\"].unique()\nadd_state_unique = updated_loans[\"addr_state\"].unique()\n\nprint(\"The unique instances of loan term are: {}\".format(term_unique))\nprint(\"The unique instances of loan grades are: {}\".format(grade_unique))\nprint(\"The unique instances of employment length are: {}\".format(emp_length_unique))\nprint(\"The unique instances of home ownership are: {}\".format(home_own_unique))\nprint(\"The unique instances of verification status are: {}\".format(ver_status_unique))\nprint(\"The unique instances of loan status are: {}\".format(loan_unique))\nprint(\"The unique instances of loan purpose are: {}\".format(purpose_unique))\nprint(\"The unique instances of address state are: {}\".format(add_state_unique))The unique instances of loan term are: [' 36 months' ' 60 months']\nThe unique instances of loan grades are: ['B' 'C' 'A' 'E' 'F' 'D' 'G']\nThe unique instances of employment length are: ['10+ years' '< 1 year' '3 years' '9 years' '4 years' '5 years' '1 year'\n '6 years' '2 years' '7 years' '8 years']\nThe unique instances of home ownership are: ['RENT' 'OWN' 'MORTGAGE' 'OTHER' 'NONE' 'ANY']\nThe unique instances of verification status are: ['Verified' 'Source Verified' 'Not Verified']\nThe unique instances of loan status are: ['Fully Paid' 'Charged Off']\nThe unique instances of loan purpose are: ['credit_card' 'car' 'small_business' 'other' 'wedding'\n 'debt_consolidation' 'home_improvement' 'major_purchase' 'medical'\n 'moving' 'vacation' 'house' 'renewable_energy' 'educational']\nThe unique instances of address state are: ['AZ' 'GA' 'IL' 'CA' 'TX' 'VA' 'MO' 'CT' 'UT' 'FL' 'NY' 'PA' 'MN' 'NJ'\n 'OR' 'KY' 'OH' 'SC' 'RI' 'LA' 'MA' 'WA' 'WI' 'AL' 'NV' 'AK' 'CO' 'MD'\n 'WV' 'VT' 'MI' 'DC' 'SD' 'NC'[...]Now, mapping dictionary is set up to convert each unique instance from the features above to integers.mapping_dict = {\n \"term\": {\n \" 36 months\": 1,\n \" 60 months\": 0\n },\n \"grade\": {\n \"A\": 1,\n \"B\": 2,\n \"C\": 3,\n \"D\": 4,\n \"E\": 5,\n \"F\": 6,\n \"G\": 7\n },\n \"loan_status\": {\n \"Fully Paid\": 1, \n \"Charged Off\": 0\n }, \n \"emp_length\": {\n \"10+ years\": 11,\n \"9 years\": 10,\n \"8 years\": 9,\n \"7 years\": 8,\n \"6 years\": 7,\n \"5 years\": 6,\n \"4 years\": 5,\n \"3 years\": 4,\n \"2 years\": 3,\n \"1 year\": 2,\n \"< 1 year\": 1,\n \"nan\": 0\n },\n \"home_ownership\": {\n \"RENT\": 1,\n \"OWN\": 2,\n \"MORTGAGE\": 3,\n \"OTHER\": 4,\n \"NONE\": 5,\n \"ANY\": 6\n },\n \"verification_status\": {\n \"Verified\": 1,\n \"Source Verified\": 2,\n \"Not Verified\": 3\n },\n \"purpose\": {\n \"credit_card\": 1,\n \"car\": 2,\n \"small_business\": 3,\n \"other\": 4,\n \"wedding\": 5,\n \"debt_consolidation\": 6,\n \"home_improvement\": 7,\n \"major_purchase\": 8,\n \"medical\": 9,\n \"moving\": 10,\n \"vacation\": 11,\n \"house\": 12,\n \"renewable_energy\": 13,\n \"educational\": 14\n },\n \"addr_state\": {\n 'AZ': 1,'GA': 2, 'IL': 3, 'CA': 4, 'OR': 5, 'NC': 6, 'TX': 7, 'VA': 8, 'MO': 9, 'CT': 10, 'UT': 11, \n 'FL': 12, 'NY': 13, 'PA': 14, 'MN': 15, 'NJ': 16, 'KY': 17, 'OH': 18, 'SC': 19, 'RI': 20, 'LA': 21,\n 'MA': 22, 'WA': 23, 'WI': 24, 'AL': 25, 'CO': 26, 'KS': 27, 'NV': 28, 'AK': 29, 'MD': 30, 'WV': 31,\n 'VT': 32, 'MI': 33, 'DC': 34, 'SD': 35, 'NH': 36, 'AR': 37, 'NM': 38, 'MT': 39, 'HI': 40, 'WY': 41, \n 'OK': 42, 'DE': 43, 'MS': 44, 'TN': 45, 'IA': 46, 'NE': 47, 'ID': 48, 'IN': 49, 'ME': 50, 'ND': 51}\n}\n\nupdated_loans = updated_loans.replace(mapping_dict)\nupdated_loans.head()From the data shape below, we see there are now 242,863 rows in the cleaned dataset.In the previous section, we saw that the original dataset containing only \"Charged Off\" and \"Fully Paid\" loans contained 252,972 rows & 52 columns. Approx 4percent of rows with missing values were removed.print(\"The data shape of the cleaned dataset is: {}.\".format(updated_loans.shape))\nupdated_loans.head()The data shape of the cleaned dataset is: (242863, 17).\nFrom the heatmap figure below (and with ref to the data disctionary), we see the strongest positive correlation is between loan amount and installment (0.95). This is followed by strong positive correlation between the no. of currently open credit lines and the total no of credit lines (0.67).\nSome moderately positive correlations are between:\n* loan amount and total credit revolving balance (0.33)\n* installment and annual income (0.32)\n* installment and total credit revolving balance(0.32)\n* dti(debt to income) and no of open credit lines(0.31) \nHighest negative correlation is between loan grade and term (-0.47). This is followed by loan amount and term (-0.40). Some other moderately strong negative correlations are between:\n* loan amount and verification status (if income source was verified by lending club or not) (-0.39)\n* installment and verification status (-0.35)plt.figure(figsize=(10, 10))\nsns.heatmap(updated_loans.corr(), annot=True, fmt=\".2f\")\nplt.title(\"Heatmap of all cleaned data features correlations\");\nplt.show()\nupdated_loans.to_csv(\"data/cleaned_loan_data.csv\",index=False)### Section 5: Machine Learning Models\n\nTwo machine models are developed:\n**1. K-Nearest Neighbors (kNN) model**\n**2. Logistic Regression model**Begin by defining features and target variable for the models. In this case, **target variable (y**) is the **loan status**. Features variables (X) are all columns in the cleaned dataset (excluding target variable).\nThen, split the data into train/test sets and look at data shape.cleaned_data = pd.read_csv(\"data/cleaned_loan_data.csv\")\n\nX = cleaned_data.loc[:, cleaned_data.columns != \"loan_status\"]\ny = cleaned_data.loc[:, \"loan_status\"]\n\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.20, random_state=42)\n\nprint(X_train.shape)\nprint(X_test.shape)\nprint(y_train.shape)\nprint(y_test.shape)(194290, 16)\n(48573, 16)\n(194290,)\n(48573,)\n### KNN models\nWe will look at 3 different KNN models: \n1. KNN(n=1)\n2. KNN(n=10)\n3. KNN model with the best n value. **1. KNN (n=1) model.**We see that KNN(n=1) model has a 72% accuracy of correctly identifying new loans as good or bad loans. Index for nearest neighbour of observations are also listed.# instantiate the estimator\nknn = KNeighborsClassifier(n_neighbors=1)\n\n# fit the model\nknn.fit(X_train, y_train)\n\n# make a prediction\ny_pred = knn.predict(X_test)\nprint(\"Prediction for KNN(n=1) model is: {}.\".format(y_pred))\n\n# model score\nknn_score1 = metrics.accuracy_score(y_test, y_pred)\nprint(\"Accuracy score using KNN(n=1): %.2f%%\" % (knn_score1 * 100.0))\n\n# create 2 new observations and use these to make predictions with the model.\nobs1 = np.array( [8000, 1, 180, 3, 7, 2, 45000, 2, 1, 10, 11.3, 5, 0, 6843, 35, 27])\nobs1 = np.reshape(obs1, (1, -1))\nobs2 = np.array([2600, 0, 190, 2, 4, 1, 27600, 3, 11, 5, 4, 6, 0, 1847.3, 23, 12.0])\nobs2 = np.reshape(obs2, (1, -1))\nprint(\"The outcome prediction for 1st new loan observation using KNN(n=1) is: {}.\".format(knn.predict(obs1)))\nprint(\"The nearest neighbor for 1st new loan observation using KNN(n=1) is at: {}.\".format(knn.kneighbors(obs1)[1]))\nprint(\"The outcome prediction for 2nd new loan observation using KNN(n=1) is: {}.\".format(knn.predict(obs2)))\nprint(\"The nearest neighbor for 2nd new loan observation using KNN(n=1) is at: {}.\".format(knn.kneighbors(obs2)[1]))Prediction for KNN(n=1) model is: [1 1 1 ... 1 1 0].\nAccuracy score using KNN(n=1): 71.93%\nThe outcome prediction for 1st new loan observation using KNN(n=1) is: [1].\nThe nearest neighbor for 1st new loan observation using KNN(n=1) is at: [[73873]].\nThe outcome prediction for 2nd new loan observation using KNN(n=1) is: [1].\nThe nearest neighbor for 2nd new loan observation using KNN(n=1) is at: [[76083]].\n**2. KNN (n=5) model**We see that the KNN(n=15) model has a higher accuracy (80%) of predicting good and bad loans than KNN(n=1).It also predicts both new loan observations as being good loans. knn = KNeighborsClassifier(n_neighbors=5)\nknn.fit(X_train,y_train)\ny_pred = knn.predict(X_test)\nprint(\"Prediction for KNN(n=10) model is: {}.\".format(y_pred))\nknn_score5 = metrics.accuracy_score(y_test, y_pred)\nprint(\"Accuracy score using KNN(n=10): %.2f%%\" % (knn_score5 * 100.0))\n\n# Use the same observations created above to make predictions for new loans using KNN(n=10) model.\nprint(\"The outcome prediction for first new loan observation using KNN(n=5) is: {}.\".format(knn.predict(obs1)))\nprint(\"The 5 nearest neighbors for 1st new loan observation using KNN(n=5) are at: {}.\".format(knn.kneighbors(obs1)[1]))\nprint(\"The outcome prediction for second new loan observation using KNN(n=5) is: {}.\".format(knn.predict(obs2)))\nprint(\"The 5 nearest neighbors for 2nd new loan observation using KNN(n=5) are at: {}.\".format(knn.kneighbors(obs2)[1]))Prediction for KNN(n=10) model is: [1 1 1 ... 1 1 1].\nAccuracy score using KNN(n=10): 79.68%\nThe outcome prediction for first new loan observation using KNN(n=5) is: [1].\nThe 5 nearest neighbors for 1st new loan observation using KNN(n=5) are at: [[ 73873 31070 184436 140641 51238]].\nThe outcome prediction for second new loan observation using KNN(n=5) is: [1].\nThe 5 nearest neighbors for 2nd new loan observation using KNN(n=5) are at: [[76083 9433 95894 48337 55575]].\n#### Plot all accuracy scores for KNN models, for k values ranging from 1 to 15. Highest accuracy score is obtained when n=11.k_range = range(1, 15)\nscores = []\nfor k in k_range:\n knn = KNeighborsClassifier(n_neighbors=k)\n knn.fit(X_train, y_train)\n y_pred = knn.predict(X_test)\n knn_score_total = metrics.accuracy_score(y_test, y_pred)\n scores.append(knn_score_total)\n\nplt.plot(k_range, scores)\nplt.xlabel(\"Value of n for KNN\")\nplt.ylabel(\"Testing accuracy\")**3. KNN (n=13) model**Based of the graph above, highest accuracy is when n=13. We created a KNN (n=13) model and calculate its accuracy score. Predictions are also made using the same 2 new observations created above.This model is a slightly higher accuracy rate (82%) than KNN(n=5), of correctly identifying whether new loans are good or bad. It also predicts both new observations are being good loans. knn = KNeighborsClassifier(n_neighbors=13)\nknn.fit(X_train,y_train)\ny_pred = knn.predict(X_test)\nprint(\"Prediction for KNN(n=13) model is: {}.\".format(y_pred))\nknn_score11 = metrics.accuracy_score(y_test, y_pred)\nprint(\"Accuracy score using KNN(n=13): %.2f%%\" % (knn_score11 * 100.0))\n\n# Use the same observations created above to make predictions for new loans using KNN(n=10) model.\nprint(\"The outcome prediction for first loan observation using KNN(n=13) is: {}.\".format(knn.predict(obs1)))\nprint(\"The 13 nearest neighbors for 1st new loan observation using KNN(n=13) are at: {}.\".format(knn.kneighbors(obs1)[1]))\nprint(\"The outcome prediction for second loan observation using KNN(n=13) is: {}.\".format(knn.predict(obs2)))\nprint(\"The 13 nearest neighbors for 2nd new loan observation using KNN(n=13) are at: {}.\".format(knn.kneighbors(obs2)[1]))\n# compare actual values to those predicted by the KNN(n=13) model.\ndf_knn = pd.DataFrame({\"Actual\": y_test, \"Predicted\": y_pred})\ndf_knn.sample(n=10)**Confusion matrix for KNN model**\nFrom the figures below, we can see that this is a biased model. It has a very high sensitivity score, showing that it's able to predict good loans correctly 99% of the time. However, it is likely to predict bad loans correctly Model has a 18% chance of misclassifying loans as good or bad. Loans predicted as being good loans have approx 83% chance of being good loans.cm = metrics.confusion_matrix(y_test, y_pred)\nprint(\"Confusion matrix for the KNN(n=11):\")\nprint(cm)\n\nTP = cm[1, 1]\nTN = cm[0, 0]\nFP = cm[0, 1]\nFN = cm[1, 0]\n\nsensitivity = TP / float(TP + FN)\nprint(\"Sensitivity: %.2f%%\" % (sensitivity * 100))\n\nprecision = TP / float(TP + FP)\nprint(\"Precision: %.2f%%\" % (precision * 100))\n\nerror = (FP + FN) / (TP + TN + FP + FN)\nprint(\"Error: %.2f%%\" % (error * 100))\n\nspecificity = TN / float(TN + FP)\nprint(\"Specificity: %.2f%%\" % (specificity * 100))Confusion matrix for the KNN(n=11):\n[[ 159 8374]\n [ 434 39606]]\nSensitivity: 98.92%\nPrecision: 82.55%\nError: 18.13%\nSpecificity: 1.86%\n**ROC curve and AUC score for KNN** \nFrom the close proximity of the ROC curve to the diagonal line and from the AUC score, it can be interpreted that test accuracy of this model is only a little better that chance.# ROC curve\ny_pred_proba = knn.predict_proba(X_test)[:,1]\nfpr, tpr, thresholds = roc_curve(y_test, y_pred_proba)\nplt.plot([0,1],[0,1],'k--')\nplt.plot(fpr,tpr, label='Knn')\nplt.xlabel('fpr')\nplt.ylabel('tpr')\nplt.title('Knn(n_neighbors=13) ROC curve')\nplt.show()\n\n# Area under ROC curve\nroc_knn = roc_auc_score(y_test,y_pred_proba)\nprint(\"Area under curve score for KNN(n=13)model is: %0.2f%%\" % (roc_knn *100))### Logistic Regression model\nWe see that the logistic regression model a marginally higher accuracy of making correct predictions of loan status,\ncompared to that of the KNN model with the best accuracy score (n=13). It also predicts both new loan observations as being good loans.# instantiate logistic regression.\nlogreg = LogisticRegression()\n\n# fit logistic regression model to the training set.\nlogreg.fit(X_train, y_train)\n\n# to predict the test set results.\ny_pred_class = logreg.predict(X_test)\nprint(\"Prediction for the logistic regression model is: {}\".format(y_pred_class))\n\n# model score\nlm_accuracy = metrics.accuracy_score(y_test, y_pred_class)\nprint(\"Accuracy score using Logistic Regression: %.2f%%\" % (lm_accuracy * 100.0))\n\n# Use the same observations created above for KNN models to make predictions for new loans using the logreg model.\nprint(\"The outcome prediction for first loan observation using logreg model is: {}.\".format(logreg.predict(obs1)))\nprint(\"The outcome prediction for second loan observation using logreg model is: {}.\".format(logreg.predict(obs2)))\n# compare actual values to those predicted by the KNN(n=13) model.\ndf_log = pd.DataFrame({\"Actual\": y_test, \"Predicted\": y_pred_class})\ndf_log.sample(n=10)**Confusion matrix for logistic regression model**\nFrom the figures below, we can see that this is a biased model. It has a very high sensitivity score, showing that it's able to predict good loans correctly 99% of the time. However, it is likely to predict bad loans correctly Model has a 18% chance of misclassifying loans as good or bad. Loans predicted as being good loans have approx 83% chance of being good loans.cm_log = metrics.confusion_matrix(y_test, y_pred_class)\nprint(\"Confusion matrix for Logistic Regression model:\")\nprint(cm_log)\n\nTP = cm_log[1, 1]\nTN = cm_log[0, 0]\nFP = cm_log[0, 1]\nFN = cm_log[1, 0]\n\nsensitivity = TP / float(TP + FN)\nprint(\"Sensitivity: %.2f%%\" % (sensitivity * 100))\n\nprecision = TP / float(TP + FP)\nprint(\"Precision: %.2f%%\" % (precision * 100))\n\nerror = (FP + FN) / (TP + TN + FP + FN)\nprint(\"Error: %.2f%%\" % (error * 100))\n\nspecificity = TN / float(TN + FP)\nprint(\"Specificity: %.2f%%\" % (specificity * 100))Confusion matrix for Logistic Regression model:\n[[ 6 8527]\n [ 12 40028]]\nSensitivity: 99.97%\nPrecision: 82.44%\nError: 17.58%\nSpecificity: 0.07%\n**ROC curve and AUC score for logistic regression**\nTest accuracy of this model at 58.6%, is a little higher than that for KNN(n=13). The logistic regression model is the slightly better model of the two models.# ROC curve\ny_pred_proba_log = logreg.predict_proba(X_test)[:,1]\nfpr, tpr, thresholds = roc_curve(y_test, y_pred_proba_log)\nplt.plot([0,1],[0,1],'k--')\nplt.plot(fpr,tpr, label='Logistic Regression')\nplt.xlabel('fpr')\nplt.ylabel('tpr')\nplt.title('Logistic Regression ROC curve')\nplt.show()\n\n# Area under ROC curve\nroc_log = roc_auc_score(y_test,y_pred_proba_log)\nprint(\"Area under curve score for logistic regression model is: %0.2f%%\" % (roc_log *100))"},"license":{"kind":"string","value":"no_license"},"path":{"kind":"string","value":"/loan-credit.ipynb"},"repo_name":{"kind":"string","value":"manieshablakey/loan-assessment-model"},"chain_length":{"kind":"number","value":47,"string":"47"}}},{"rowIdx":4892,"cells":{"content":{"kind":"string","value":"## Condition sınıfının cinsiyet karşılaştırılmasıdata.iloc[:23][\"gender\"].value_counts().plot.barh();\nplt.title(\"Cinsiyet karşılaştırma\")\nplt.xlabel(\"Veri sayısı\")\nplt.ylabel(\"cinsiyetler (1 k, 2 e)\")\ndata.iloc[:23][\"gender\"].value_counts().plot.pie();## Control grubunun cinsiyet karşılaştırılmasıdata.iloc[23:][\"gender\"].value_counts().plot.barh();\nplt.title(\"Cinsiyet karşılaştırma\")\nplt.xlabel(\"Veri sayısı\")\nplt.ylabel(\"cinsiyetler (1 k, 2 e)\")\ndata.iloc[23:][\"gender\"].value_counts().plot.pie();## tedavi sonuçlarını birbirinden çıkarıp etkilerine bakalımdata[\"madrs3\"]=data[\"madrs1\"]-data[\"madrs2\"]## Çıkan sonuçlara acaba gün sayısı etki ediyor mu ?sbn.scatterplot(x=\"madrs3\",y=\"days\",data=data)#### Gün arttıkça tedavinin olumlu yanıtlar verdiği söylenebilir## acaba cinsiyetler tedavi sürecinde etkili mi ?sex=[\"man\" if i==2 else \"woman\" for i in data.gender]\ndata[\"sex\"]=sex\nsbn.scatterplot(x=\"madrs3\",y=\"days\",hue=\"sex\",data=data)\nsbn.barplot(x=\"days\",y=\"madrs3\",hue=\"sex\",data=data);\nplt.legend(loc=2)## Yaş aralığına göre hastalık görülme durumuplt.plot(data[\"age\"].unique(),data.groupby(\"age\")[\"gender\"].count(),\"b-*\")\nplt.title(\"Bütün veri seti sonucu oluşan grafik\")\nplt.xlabel(\"yaş aralıkları\")\nplt.ylabel(\"hastalık görülme sayısı\")##### conditional ve control grubunun ayrı ayrı analiz edilmesiplt.plot(data.iloc[:23][\"age\"].unique(),data.iloc[:23].groupby(\"age\")[\"gender\"].count(),\"g-*\")\nplt.title(\"Condition grubu\")\nplt.xlabel(\"yaş aralıkları\")\nplt.ylabel(\"hastalık görülme sayısı\")\nplt.plot(data.iloc[23:][\"age\"].unique(),data.iloc[23:].groupby(\"age\")[\"gender\"].count(),\"r-*\")\nplt.title(\"Control grubu\")\nplt.xlabel(\"yaş aralıkları\")\nplt.ylabel(\"hastalık görülme sayısı\")## Yaş aralıklarının cinsiyetlere göre dağılımısbn.barplot(x=\"age\",y=data.age.index,hue=\"sex\",data=data);\nplt.xlabel(\"yaş aralığı\")\nplt.title(\"Yaş aralıklarının cinsiyetlere göre dağılımı\")## madrs3 değişkeninin verilerle olan korelasyonu (sayısal ve grafiksel gösterimi)data.corr()[\"madrs3\"]\ndata.corr()[\"madrs3\"].sort_values().plot.barh();\n## okul ## conditional grubu hastaların eğitim durumu dağılımıdata.iloc[:23][\"edu\"].value_counts()## eğitim durumu ile hastalık orantılı mı ?sbn.catplot(x=\"edu\",y=\"madrs3\",data=data);#### cinsiyeti 3. boyut olarak eklersek acaba eğitim durumuyla cinsiyet arasında ilişki var mı ?sbn.catplot(x=\"edu\",y=\"madrs3\",hue=\"sex\",data=data);\n## \ndata.head()\ndata[\"cat_afftype\"]=data[\"afftype\"]\ncounter=0\nfor i in data[[\"afftype\"]].values:\n if(i==1):\n data[\"cat_afftype\"].iloc[counter]=\"bipolarr\"\n elif(i==2):\n data[\"cat_afftype\"].iloc[counter]=\"unipolar_depresif\"\n elif(i==3):\n data[\"cat_afftype\"].iloc[counter]=\"bipolar\"\n counter+=1\ndata.head()\ndata.cat_afftype=data.cat_afftype.astype(pd.CategoricalDtype())\ndata.info()\ndata.cat_afftype.value_counts()\ndata[\"afftype\"].value_counts()\nsbn.catplot(x=\"cat_afftype\",y=\"madrs3\",data=data,hue=\"sex\");\nsbn.violinplot(x=\"sex\",y=\"madrs3\",data=data)\nsbn.barplot(x=\"cat_afftype\",y=\"madrs3\",hue=\"sex\",data=data)\ndata.head()# çalışmak ve evlilik durumları depresyonu tedavisi sonucunu etkiler mi ?sbn.barplot(x=\"work\",y=\"madrs3\",hue=\"marriage\",data=data)\nplt.legend(loc=4)## yaş grupları ile evlilik durumları depresyon tedavisi sonucunu etkiler mi ?sbn.barplot(x=\"age\",y=\"madrs3\",hue=\"marriage\",data=data)\nplt.legend(loc=4)\ndata[\"age\"]=data[\"age\"].astype(pd.CategoricalDtype(categories=['20-24', '25-29', '30-34', '35-39', '40-44', '45-49', '50-54',\n '55-59', '60-64', '65-69'],ordered=True))\ndata.age.iloc[:5]\nliste=[]\nfor i in data.age.cat.codes:\n if(0<=i<3):\n liste.append(0)\n elif(i<5):\n liste.append(1)\n elif(i<7):\n liste.append(2)\n else: liste.append(3)\ndata[\"new_age\"]=liste## Yaşları kategorize edip bunun dağılımını ve cinsiyetin bu dağılıma etkisini inceleyelimplt.figure(figsize=(12,6))\nplt.subplot(1,2,1)\nsbn.barplot(x=\"new_age\",y=\"madrs3\",hue=\"marriage\",data=data)\nplt.subplot(1,2,2)\nsbn.barplot(x=\"new_age\",y=\"madrs3\",hue=\"sex\",data=data)\nplt.legend(loc=4)## Ayakta tedavi görmek acaba yatarak tedavi görmekten daha mı iyi sonuç veriyor ?sbn.boxplot(x=\"inpatient\",y=\"madrs3\",data=data)\nplt.grid()## Acaba cinsiyet üzerinde bu ddağılım değişiyor mu ?sbn.boxplot(hue=\"sex\",x=\"inpatient\",y=\"madrs3\",data=data)\nplt.grid()## Eğitim seviyesinin tedavi yöntemine etkisinin incelenmesisbn.boxplot(hue=\"edu\",x=\"inpatient\",y=\"madrs3\",data=data)\nplt.grid()## tedavi yöntemlerinin dağılımıdata[\"inpatient\"].value_counts().plot.pie()## Eğitim seviyesinin tedavi yöntemleri üzerine dağılımısbn.countplot(x=\"inpatient\",hue=\"edu\",data=data)## Tedavisi olumlu oluşanların incelenmesipoz_result=data[data.madrs3>0]\nsta_result=data[data.madrs3==0]\nneg_result=data[data.madrs3<0]\npoz_result\nplt.figure(figsize=(12,6))\nplt.subplot(1,2,1)\nsbn.barplot(x=\"new_age\",y=\"madrs3\",data=data);\nplt.title(\"kategorik yaş sınıflandırmasının dağılımı\")\nplt.subplot(1,2,2)\nplt.title(\"cinsiyetler üzerine dağılım\")\nsbn.barplot(x=\"new_age\",y=\"madrs3\",hue=\"sex\",data=data);\nplt.figure(figsize=(12,6))\nplt.subplot(1,2,1)\nplt.title(\"evliliğin üzerine dağılım\")\nsbn.barplot(x=\"new_age\",y=\"madrs3\",hue=\"marriage\",data=data);\nplt.subplot(1,2,2)\nplt.title(\"çalışma stilinin veri üzerine dağılım\")\nsbn.barplot(x=\"new_age\",y=\"madrs3\",hue=\"work\",data=data);## Stabil kalan hastaların incelenmesista_result## Negatif kalan hastaların incelenmesineg_result"},"license":{"kind":"string","value":"no_license"},"path":{"kind":"string","value":"/Depresyon Analizi.ipynb"},"repo_name":{"kind":"string","value":"omermacitt/Patika_"},"chain_length":{"kind":"number","value":23,"string":"23"}}},{"rowIdx":4893,"cells":{"content":{"kind":"string","value":"# ! to change to command line environment \n!pip install bing-image-downloader\n\n! mkdir images\nfrom bing_image_downloader import downloader\ndownloader.download('monkey animal', limit=20, output_dir='images')\ndownloader.download('pigeon bird', limit=20, output_dir='images')\ndownloader.download('butterfly insect', limit=20, output_dir='images')\nimport os\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom skimage.io import imread\nfrom skimage.transform import resize\n\nDATA = '/content/images'\ncategory = ['monkey animal','pigeon bird','butterfly insect']\nflat_data =[]\ntarget = []\n\nfor i in category:\n path = os.path.join(DATA,i)\n print(path)\n for img in os.listdir(path):\n img_array = imread(os.path.join(path,img)) #showing images\n # print(img_array.shape)\n img_resized = resize(img_array,(150,150,3)) # 0-255 values will be normalised (0-1)\n flat_data.append(img_resized.flatten())\n target.append(i)\n\nflat_data\n\n#os.listdir(path)\n\n#target\nplt.imshow(img_resized)\nflat_data[0]\n#imread('/content/images/monkey animal/Image_1.jpg')\nplt.imshow?\n\nimport pandas as pd\n\ndf = pd.DataFrame(flat_data)\ndf\ndf['Target'] = target\nx = df.drop('Target',axis=1).values\ny = df['Target'].values\n\nfrom sklearn.model_selection import train_test_split\nx_train, x_test, y_train, y_test = train_test_split(x,y, random_state = 0)\nx_test.shape\nfrom sklearn.svm import SVC \nmodel = SVC()\nfrom sklearn.model_selection import GridSearchCV\n\nparam_grid = {'C': [1,10,100,1000],'kernel': ['rbf','linear','poly'],'gamma': [0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9] }\n\ngrid = GridSearchCV(SVC(),param_grid)\n\ngrid = grid.fit(x_train,y_train)\ngrid.best_score_\ngrid.best_params_\npd.DataFrame(grid.cv_results_)\nmodel = SVC(kernel='rbf')\nmodel.fit(x_train,y_train)\ny_pred = model.predict(x_test)\nfrom sklearn.metrics import accuracy_score, confusion_matrix, classification_report\naccuracy_score(y_pred,y_test)\nfrom sklearn.linear_model import LogisticRegression\n\nfrom sklearn.svm import SVC \nmodel = SVC()\nmodel.fit(x_train,y_train)\ny_pred = model.predict(x_test)\naccuracy_score(y_pred,y_test)\nfrom sklearn.preprocessing import StandardScaler\nstand = StandardScaler()\nx_train = stand.fit_transform(x_train)\nx_test = stand.transform(x_test)\nmodel = LogisticRegression()\nmodel.fit(x_train,y_train)\ny_pred = model.predict(x_test)\naccuracy_score(y_pred,y_test)\ny_test[0]\ny_pred[0]\nimg = imread('/content/test1.jpg')\nplt.imshow(img)\nx_test[0]\nimg = resize(img,(150,150,3))\nimg_d = img.flatten()\nimg_d = img_d.reshape(1,-1)\nimg_d\n\nmodel.predict(img_d)\nconfusion_matrix(y_pred,y_test)\nprint(classification_report(y_pred,y_test))\n"},"license":{"kind":"string","value":"no_license"},"path":{"kind":"string","value":"/Minor_project.ipynb"},"repo_name":{"kind":"string","value":"Navya-89/Mini-Project"},"chain_length":{"kind":"number","value":1,"string":"1"}}},{"rowIdx":4894,"cells":{"content":{"kind":"string","value":"# Starbucks Capstone Challenge\n\n### Introduction\n\nThis data set contains simulated data that mimics customer behavior on the Starbucks rewards mobile app. Once every few days, Starbucks sends out an offer to users of the mobile app. An offer can be merely an advertisement for a drink or an actual offer such as a discount or BOGO (buy one get one free). Some users might not receive any offer during certain weeks. \n\nNot all users receive the same offer, and that is the challenge to solve with this data set.\n\nYour task is to combine transaction, demographic and offer data to determine which demographic groups respond best to which offer type. This data set is a simplified version of the real Starbucks app because the underlying simulator only has one product whereas Starbucks actually sells dozens of products.\n\nEvery offer has a validity period before the offer expires. As an example, a BOGO offer might be valid for only 5 days. You'll see in the data set that informational offers have a validity period even though these ads are merely providing information about a product; for example, if an informational offer has 7 days of validity, you can assume the customer is feeling the influence of the offer for 7 days after receiving the advertisement.\n\nYou'll be given transactional data showing user purchases made on the app including the timestamp of purchase and the amount of money spent on a purchase. This transactional data also has a record for each offer that a user receives as well as a record for when a user actually views the offer. There are also records for when a user completes an offer. \n\nKeep in mind as well that someone using the app might make a purchase through the app without having received an offer or seen an offer.\n\n### Example\n\nTo give an example, a user could receive a discount offer buy 10 dollars get 2 off on Monday. The offer is valid for 10 days from receipt. If the customer accumulates at least 10 dollars in purchases during the validity period, the customer completes the offer.\n\nHowever, there are a few things to watch out for in this data set. Customers do not opt into the offers that they receive; in other words, a user can receive an offer, never actually view the offer, and still complete the offer. For example, a user might receive the \"buy 10 dollars get 2 dollars off offer\", but the user never opens the offer during the 10 day validity period. The customer spends 15 dollars during those ten days. There will be an offer completion record in the data set; however, the customer was not influenced by the offer because the customer never viewed the offer.\n\n### Cleaning\n\nThis makes data cleaning especially important and tricky.\n\nYou'll also want to take into account that some demographic groups will make purchases even if they don't receive an offer. From a business perspective, if a customer is going to make a 10 dollar purchase without an offer anyway, you wouldn't want to send a buy 10 dollars get 2 dollars off offer. You'll want to try to assess what a certain demographic group will buy when not receiving any offers.\n\n### Final Advice\n\nBecause this is a capstone project, you are free to analyze the data any way you see fit. For example, you could build a machine learning model that predicts how much someone will spend based on demographics and offer type. Or you could build a model that predicts whether or not someone will respond to an offer. Or, you don't need to build a machine learning model at all. You could develop a set of heuristics that determine what offer you should send to each customer (i.e., 75 percent of women customers who were 35 years old responded to offer A vs 40 percent from the same demographic to offer B, so send offer A).# Data Sets\n\nThe data is contained in three files:\n\n* portfolio.json - containing offer ids and meta data about each offer (duration, type, etc.)\n* profile.json - demographic data for each customer\n* transcript.json - records for transactions, offers received, offers viewed, and offers completed\n\nHere is the schema and explanation of each variable in the files:\n\n**portfolio.json**\n* id (string) - offer id\n* offer_type (string) - type of offer ie BOGO, discount, informational\n* difficulty (int) - minimum required spend to complete an offer\n* reward (int) - reward given for completing an offer\n* duration (int) - time for offer to be open, in days\n* channels (list of strings)\n\n**profile.json**\n* age (int) - age of the customer \n* became_member_on (int) - date when customer created an app account\n* gender (str) - gender of the customer (note some entries contain 'O' for other rather than M or F)\n* id (str) - customer id\n* income (float) - customer's income\n\n**transcript.json**\n* event (str) - record description (ie transaction, offer received, offer viewed, etc.)\n* person (str) - customer id\n* time (int) - time in hours since start of test. The data begins at time t=0\n* value - (dict of strings) - either an offer id or transaction amount depending on the record\n\n**Note:** If you are using the workspace, you will need to go to the terminal and run the command `conda update pandas` before reading in the files. This is because the version of pandas in the workspace cannot read in the transcript.json file correctly, but the newest version of pandas can. You can access the termnal from the orange icon in the top left of this notebook. \n\nYou can see how to access the terminal and how the install works using the two images below. First you need to access the terminal:\n\n\n\nThen you will want to run the above command:\n\n\n\nFinally, when you enter back into the notebook (use the jupyter icon again), you should be able to run the below cell without any errors.import pandas as pd\nimport numpy as np\nimport math\nimport json\nfrom matplotlib import pyplot as plt\nfrom matplotlib.pyplot import figure\n%matplotlib inline\n\n\n# read in the json files\nportfolio = pd.read_json('data/portfolio.json', orient='records', lines=True)\nprofile = pd.read_json('data/profile.json', orient='records', lines=True)\ntranscript = pd.read_json('data/transcript.json', orient='records', lines=True)## Data Cleaning & Preprocessing# View the portfolio table for any issues\nportfolioThe portfolio data is all visible above with no obvious issues to resolve.# Determine how many records there are in the profile data\nprofile.shapeThe profile dataset is larger and will require some more tools to clean# check for missing values\nprofile.isna().sum()\n# check for duplicates\nprofile.duplicated().sum()No duplicates but 2175 missing income values. These will be dropped because the income will be used as a predictor in the model for this dataset and imputing the income based on the other data available would not be useful to training the model.# drop nulls and check \nprofile = profile.dropna()\nprofile.isna().sum()\n# check the new number of records \nprofile.shape\n# check for any customers under 18\nprofile[profile['age']<18].agg('count')\n# check for any customers over 90\nprofile[profile['age']>90].agg('count')\n# get the frequency distribution of the people over 90 \nprofile[profile['age']>90].groupby(['age']).agg('count')There are a lot of customers over 90 but given the dataset has 17000 customers, 219 of them being over 90 years old is not unrealistic. Nor is it unrealistic that 5 people could be 101 years old. This information will be retained for the model # get number of records in the transcript data table\ntranscript.shape\n# extract the value column which has objects in each row\nvalues_df = transcript['value']\n# convert the json objects in each row to a PD series\nvalues_df = values_df.apply(pd.Series)\n# merge the new values series to the existing table and drop the original value column\ntranscript_clean = pd.concat([transcript.drop('value', axis=1),values_df],axis=1)\n# check the result\ntranscript_clean\n# check how many null items are in offer_id\ntranscript_clean['offer_id'].isna().sum()\n# the recieved offers use a different column name, offer id without the underscore.\n# fill the nulls from the offer_id column with the values from offer id to merge into one row\ntranscript_clean['offer_id'].fillna(transcript_clean['offer id'], inplace = True)\n# check how many nulls are in offer_id after the fill\ntranscript_clean['offer_id'].isna().sum()\n# drop the redundant columns \ntranscript_cleaning = transcript_clean.drop(['offer id','amount','reward'], axis=1)\n# the remaining columns are person, event, offer_id and time. \n# The remaining nulls are in the offer_id field where the event is transaction, so these rows will be dropped\ntranscript_cleaned = transcript_cleaning.dropna()\n# checking the final cleaned table\ntranscript_cleaned\n# subsets from the cleaned table will be created for received, viewed and completed offers below \n\nreceived_df = transcript_cleaned[transcript_cleaned['event']=='offer received'].sort_values(by='person')\nviewed_df = transcript_cleaned[transcript_cleaned['event']=='offer viewed'].sort_values(by='person')\ncompleted_df = transcript_cleaned[transcript_cleaned['event']=='offer completed'].sort_values(by='person')\n# create a completed_offers field and initialise to 0. This will be the binary classifier \ncompleted_df['completed_offers']=0\n# create a list of every person from the profile table\nperson_list = [i for i in profile['id'].unique()]\nlen(person_list)\n# some customers received the same offer more than once. Drop all but the first of each unique received offer\ncompleted_df = completed_df.sort_values(by='time').drop_duplicates(subset=['person','offer_id']).sort_values(by='person')\n# as above but for viewed offers\nviewed_df = viewed_df.sort_values(by='time').drop_duplicates(subset=['person','offer_id']).sort_values(by='person')\n# as above for received offers. This also includes informational offers which will not be used for modelling\nreceived_df = received_df.sort_values(by='time').drop_duplicates(subset=['person','offer_id']).sort_values(by='person')\ncompleted_df\n# Merge the completed offers and viewed offers dataframes, preserving the completed offers rows\ncombined_df = pd.merge(left=completed_df, right=viewed_df, how='left', on=['person','offer_id'])\ncombined_df\n# Not all customers and offer combinations are in the viewed offers data, where they are missing\n# the time will be filled with a large value, 1000\ncombined_df['time_y'].fillna(1000)\n# Create a new boolean array to see if the offer completed time is more than or equal to the view time\ncompleted_offers = combined_df['time_x']>=combined_df['time_y']\ncompleted_offers\n# Convert the boolean to 0 and 1\ncompleted_offers.map({False: 0, True:1})\n# apply the mapping to the original mapping column \ncombined_df['completed_offers']=completed_offers.map({False: 0, True:1})\ncombined_df\n# pivot the table to see each offer, for visualisation of the table only\ncombined_df.pivot(index='person', columns=['offer_id'], values=['completed_offers'])\n# construct a dictionary of offer type for each ID \noffer_dict = {}\nfor i in range(len(portfolio)):\n offer_dict[portfolio['id'].iloc[i]] = portfolio['offer_type'].iloc[i]\noffer_dict\n# map the offer type based on the ID, in the combined dataframe \ncombined_df['offer_id'] = combined_df['offer_id'].map(offer_dict)\n# create an inverted flag of the completed offers, for the sorting step in the next cell\ncombined_df['offer_flag'] = 1-combined_df['completed_offers']\n# sort by person, offer ID and offer flag ascending, and drop all duplicate offers. \n# the offer ascending flag will start with 0, this is where completed_offers is 1,\n# thus ensuring we dont drop a completed offer is one exists\noffers_df=combined_df.sort_values(by=['person','offer_id','offer_flag']).drop_duplicates(subset=['person','offer_id']).sort_values(by='person')\n# the new table has a customer ID with one of each offer (BOGO and discount)\n# pivot the table, re-insert the person ID as a column and fill NaN classes with 0\npivoted_offers = offers_df.pivot(index='person', columns=['offer_id'], values=['completed_offers'])\npivoted_offers['id'] = pivoted_offers.index\npivoted_offers = pivoted_offers.fillna(0)\n# merge the pivoted table with the profile table for the customers we retained.\nprofile_offers_df = pd.merge(left = pivoted_offers, right=profile, how='right', on='id')\n# check all 14,825 customers are retained with the new columns added \nprofile_offers_df\n# rename the columns\nprofile_offers_df.columns = ['id','bogo','discount','id2','gender','age','became_member_on','income']\n# construct the final dataframe, dropping the redundant columns\nfinal_df = profile_offers_df.drop(['id2','gender','became_member_on'],axis=1)\n# fill the remaining NaN values with 0 (for customers who didnt complete any offers)\nfinal_df = final_df.fillna(0)\n# save the file \nfinal_df.to_csv('modelling_data.csv')## Analysis\n\nThe final_df dataframe now has the information we need for modelling. the bogo and discount classes are identified for each person by id, along with their age and income. # Load the file (if returning to this later)\nfinal_df = pd.read_csv('modelling_data.csv')\n# Plot the freq. distribution for customer age\nfigure(figsize=(10,7))\nplt.bar(profile.groupby('age').agg('count').index,profile.groupby('age').agg('count')['gender'])\nplt.xlabel('Age')\nplt.ylabel('Frequency')\nplt.title('Customer Age Distribution')\n# plot the distribution for customer salary\nfigure(figsize=(10,7))\nplt.plot(profile.groupby('income').agg('count').index,profile.groupby('income').agg('count')['gender'])\nplt.xlabel('Salary')\nplt.ylabel('Frequency')\nplt.title('Customer Salary Distribution')\n# Summarise the statistics of each class and the proportion of data in each class\n\nbogo1 = final_df[final_df['bogo']==1]['bogo'].agg('count')\nbogo0 = final_df[final_df['bogo']==0]['bogo'].agg('count')\ndisc1 = final_df[final_df['discount']==1]['discount'].agg('count')\ndisc0 = final_df[final_df['discount']==0]['discount'].agg('count')\nprint('There are %d customers who viewed and completed BOGO and %d customers who did not view to complete the offer.\\n\\\nThere are %d customers who viewed and completed discount and %d customers who did not view to complete the offer. \\n\\\nThis gives us a fraction of %f and %f of BOGO and discount offers compeleted, respectively' %(bogo1,bogo0,disc1,disc0,(bogo1/(bogo1+bogo0)),(disc1/(disc1+disc0))))\n# plot the BOGO classes for all customers using the age and salary\n\nfigure(figsize=(12,10))\nplt.scatter(final_df['age'],final_df['income'],s=5,c=final_df['bogo'],cmap='copper')\nplt.colorbar()\nplt.title('Customer BOGO Classes')\nplt.xlabel('Age')\nplt.ylabel('Salary')\n# plot the discount classes using age and salary\n\nfigure(figsize=(12,10))\nplt.scatter(final_df['age'],final_df['income'],s=5,c=final_df['discount'],cmap='winter')\nplt.colorbar()\nplt.title('Customer Discount Classes')\nplt.xlabel('Age')\nplt.ylabel('Salary')## Modellingfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.svm import SVC\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import classification_report\nfrom sklearn.preprocessing import StandardScaler\n# define the X and Y data from the final dataframe \nXData = final_df.drop(['id','bogo','discount'], axis=1)\n#XData['gender']=XData['gender'].map({'F':0,'M':1,'O':2})\nYData = final_df.drop(['id','age','income'],axis=1)\n# Scale the X data so the salary magnitude doesn't over-influence the fitting\nscaler = StandardScaler()\nscaler.fit(XData)\nXscaled = scaler.transform(XData)\n# split the data into training and testing sets\nX_train, X_test, y_train, y_test = train_test_split(Xscaled, YData, test_size=0.33, random_state=42)\n# apply a logistic regression model for both offer types, then get the predictions and print the classification report\nLRmodelBOGO = LogisticRegression()\nLRmodelBOGO.fit(X_train,y_train['bogo'])\nLRmodelDisc = LogisticRegression()\nLRmodelDisc.fit(X_train,y_train['discount'])\n\ny_bogo_pred = LRmodelBOGO.predict(X_test)\ny_disc_pred = LRmodelDisc.predict(X_test)\nprint(classification_report(y_test['bogo'],y_bogo_pred))\nprint(classification_report(y_test['discount'],y_bogo_pred))\n# plot the training classes in the new scaled X space for BOGO\nfigure(figsize=(12,10))\nplt.scatter(X_train[:,0],X_train[:,1],s=5,c=y_train['bogo'],cmap='copper')\nplt.colorbar()\nplt.title('Customer BOGO Classes')\nplt.xlabel('Age')\nplt.ylabel('Salary')\n# plot the training classes for the new scaled X space for discounts\nfigure(figsize=(12,10))\nplt.scatter(X_train[:,0],X_train[:,1],s=5,c=y_train['discount'],cmap='winter')\nplt.colorbar()\nplt.title('Customer Discount Classes')\nplt.xlabel('Age')\nplt.ylabel('Salary')\n# plot the actual test classes for BOGO with the scaled test inputs\nfigure(figsize=(12,10))\nplt.scatter(X_test[:,0],X_test[:,1],s=5,c=y_test['bogo'],cmap='copper')\nplt.colorbar()\nplt.title('Customer BOGO Actual Classes')\nplt.xlabel('Age')\nplt.ylabel('Salary')\n# plot the predicted BOGO classes from the LR model for the test input\nfigure(figsize=(12,10))\nplt.scatter(X_test[:,0],X_test[:,1],s=5,c=y_bogo_pred,cmap='copper')\nplt.colorbar()\nplt.title('Customer BOGO Prediction Classes')\nplt.xlabel('Age')\nplt.ylabel('Salary')\n# Plot the LR predicted classes for discount using the test input\nfigure(figsize=(12,10))\nplt.scatter(X_test[:,0],X_test[:,1],s=5,c=y_disc_pred,cmap='winter')\nplt.colorbar()\nplt.title('Customer Discount Prediction Classes')\nplt.xlabel('Age')\nplt.ylabel('Salary')\n# plot the actual discount classes for the test data\nfigure(figsize=(12,10))\nplt.scatter(X_test[:,0],X_test[:,1],s=5,c=y_test['discount'],cmap='winter')\nplt.colorbar()\nplt.title('Customer Discount Actual Classes')\nplt.xlabel('Age')\nplt.ylabel('Salary')\n# fit a random forest classifier with training data, then predict the classes and print report\nDTmodelBOGO = RandomForestClassifier(n_estimators=200)\nDTmodelBOGO.fit(X_train,y_train['bogo'])\nDTmodelDisc = RandomForestClassifier(n_estimators=200)\nDTmodelDisc.fit(X_train,y_train['discount'])\n\ny_bogo_pred = DTmodelBOGO.predict(X_test)\ny_disc_pred = DTmodelDisc.predict(X_test)\nprint(classification_report(y_test['bogo'],y_bogo_pred))\nprint(classification_report(y_test['discount'],y_bogo_pred))\n# random forest classifier BOGO predictions plot\nfigure(figsize=(12,10))\nplt.scatter(X_test[:,0],X_test[:,1],s=5,c=y_bogo_pred,cmap='copper')\nplt.colorbar()\nplt.title('Customer BOGO Prediction Classes')\nplt.xlabel('Age')\nplt.ylabel('Salary')\n# random forest classifier discount predictions plot\nfigure(figsize=(12,10))\nplt.scatter(X_test[:,0],X_test[:,1],s=5,c=y_disc_pred,cmap='winter')\nplt.colorbar()\nplt.title('Customer Discount Prediction Classes')\nplt.xlabel('Age')\nplt.ylabel('Salary')\n# as above but for the SVC models\nSVCmodelBOGO = SVC(kernel='rbf')\nSVCmodelBOGO.fit(X_train,y_train['bogo'])\nSVCmodelDisc = SVC(kernel='rbf')\nSVCmodelDisc.fit(X_train,y_train['discount'])\n\ny_bogo_pred = SVCmodelBOGO.predict(X_test)\ny_disc_pred = SVCmodelDisc.predict(X_test)\nprint(classification_report(y_test['bogo'],y_bogo_pred))\nprint(classification_report(y_test['discount'],y_bogo_pred))\n# SVC BOGO test data predictions \nfigure(figsize=(12,10))\nplt.scatter(X_test[:,0],X_test[:,1],s=5,c=y_bogo_pred,cmap='copper')\nplt.colorbar()\nplt.title('Customer BOGO Prediction Classes')\nplt.xlabel('Age')\nplt.ylabel('Salary')\n# SVC discount test data predictions\nfigure(figsize=(12,10))\nplt.scatter(X_test[:,0],X_test[:,1],s=5,c=y_disc_pred,cmap='winter')\nplt.colorbar()\nplt.title('Customer Discount Prediction Classes')\nplt.xlabel('Age')\nplt.ylabel('Salary')\n# as above but for KNN\nKNmodelBOGO = KNeighborsClassifier(n_neighbors=3)\nKNmodelBOGO.fit(X_train,y_train['bogo'])\nKNmodelDisc = KNeighborsClassifier(n_neighbors=3)\nKNmodelDisc.fit(X_train,y_train['discount'])\n\ny_bogo_pred = KNmodelBOGO.predict(X_test)\ny_disc_pred = KNmodelDisc.predict(X_test)\nprint(classification_report(y_test['bogo'],y_bogo_pred))\nprint(classification_report(y_test['discount'],y_bogo_pred))\n# KNN bogo test data predictions \nfigure(figsize=(12,10))\nplt.scatter(X_test[:,0],X_test[:,1],s=5,c=y_bogo_pred,cmap='copper')\nplt.colorbar()\nplt.title('Customer BOGO Prediction Classes')\nplt.xlabel('Age')\nplt.ylabel('Salary')\n# KNN discount test data predictions \nfigure(figsize=(12,10))\nplt.scatter(X_test[:,0],X_test[:,1],s=5,c=y_disc_pred,cmap='winter')\nplt.colorbar()\nplt.title('Customer Discount Prediction Classes')\nplt.xlabel('Age')\nplt.ylabel('Salary')"},"license":{"kind":"string","value":"no_license"},"path":{"kind":"string","value":"/Starbucks_Capstone_notebook.ipynb"},"repo_name":{"kind":"string","value":"ravp90/UdacityStarbucksCapstone"},"chain_length":{"kind":"number","value":8,"string":"8"}}},{"rowIdx":4895,"cells":{"content":{"kind":"string","value":" Binary classification as a phase separation process\nRafael Monteiro Mathematics for advanced Materials - Open Innovation Lab (MathAM-OIL, AIST), Sendai, Japan\nemail : monteirodasilva-rafael@aist.jp, rafael.a.monteiro.math@gmail.com\n\n\nThis is a companion website to the article **Binary classification as a phase separation process**,\nby [Rafael Monteiro](https://sites.google.com/view/rafaelmonteiro-math/home). A preprint is available on arXiv https://arxiv.org/abs/2009.02467.\n\nFor the dataset and trainable models, see the data repository at Zenodo.\n\n## A few examples\n\nTo begin with, I will introduce the model, giving some examples of its use. Let's first import some librariesfrom sklearn.model_selection import train_test_split, StratifiedKFold\nfrom sklearn.preprocessing import MinMaxScaler\nimport matplotlib.gridspec as gridspec\nimport matplotlib.pyplot as plt\nimport matplotlib.pylab as pl\nimport numpy as np\nimport scipy.sparse as sc\nimport sympy\nimport itertools as it\n###-----------------------------------\n## The modules for this paper are here\nfrom binary_phase_separation import *\nfrom aux_fnts_for_jupyter_notebooks import *\n###-----------------------------------\n### In order to open ans save dictionaries\ntry:\n import cPickle as pickle\nexcept ImportError: # python 3.x\n import pickle\n\nimport pandas as pd\nimport warnings\nwarnings.filterwarnings(action = \"ignore\", message = \"internal issue\")\n\n## Things necessary to do nice plots\nfrom matplotlib.ticker import ScalarFormatter, FuncFormatter\nfrom matplotlib.transforms import Affine2D\nfrom matplotlib import rcParams\nplt.rc('axes', labelsize = 18)\nplt.rc('xtick', labelsize = 16)\nplt.rc('ytick', labelsize = 16)\nplt.rc('font', size = 20)\nplt.rc('grid', alpha = 0.6)\nplt.rc('legend', fontsize = 18)\nrcParams['font.family'] = \"Times New Roman\"\nrcParams['mathtext.fontset'] = 'custom' \nrcParams['lines.linewidth'] = 2\nrcParams['lines.markersize'] = 10\nrcParams['lines.markeredgewidth'] = 2 Nonlinear diffusion equations: some illustrative examples\n\nAs discussed in Section 1.1 in the paper, the foundations of the model lie on nonlinear diffusion processes, which we briefly illustrate with an example. The heart of the model is the Allen-Cahn equation, a well-known equation in the field of pattern formation. Just to show how the code we have can be used in that case, we will plot the evolution of an initial boundary value problem, with Neumann boundary conditions.\n\nLet's see first take a look at the evolution of $$u_0(x) = \\frac{1- \\sin(\\pi(2x - 1))}{2}$$\n\nas an initial condition to the Allen-Cahn equation \n\n$$\\partial_tu(x, t) = \\varepsilon \\partial_x^2u(x, t) + u(x, t)(1 − u(x, t))(u(x, t) − \\alpha(x)).$$\n\nThe parameter $\\alpha(\\cdot)$ embodies medium heterogeneity. In this case, we choose $\\alpha(x) = -2$, when $x <0.5$, and $\\alpha(x)$ = 2, when $x \\geq 0.5$.\n\nParameters to the model are given below:N = 20\nx = np.linspace(0, 1, N, endpoint = True)\nV_0 = 1/2 - 1/2 * np.reshape(np.sin(np.pi * (2 * x - 1)) , (-1,1))\nprop = Propagate()\ndt, eps, Nx, Nt = 0.1, .3, N, 400\ndx, ptt_cardnlty, weigths_k_sharing = x[1]-x[0], Nx, NtThen we initialize parametersinit = Initialize_parameters()\nparam = init.dictionary(N, eps, dt, dx, Nt, ptt_cardnlty, weigths_k_sharing)If you read the paper you remember that trainable weights are the coefficients of this PDE. Since the model randomly initialize these coefficients, we will have to readjust them to the value we want. That's what we do in the next part of the code.for i in range(param[\"Nt\"]): param[\"alpha_x_t\"][:,i] = -2 * (x < .5) + 2 * (x >= .5)which we now run, using the numerical scheme (1.7a) in the paper. As poijnted out there, this is the same as doing a forward propagation: that's why you see the method \"prop.forward\" in the code below.flow, waterfall, time = prop.forward(V_0, param, waterfall_save = True , Flow_save = True)\ntime = np.arange(Nt + 1)\nX, Y = np.meshgrid(x, time)\nflow = np.squeeze(flow, axis = 1)\nfig = plt.figure(figsize = (15,8))\nax = fig.add_subplot(111, projection = '3d')\ncolor = plt.cm.viridis(np.arange(N)) \nsurf = ax.plot_wireframe(X, Y, flow.T, rstride = 10, cstride = 1,\\\n alpha = None, antialiased = True, linewidth = 3)\nax.view_init(60, -40)\nplt.draw()\nsurf.set_edgecolors(color)\nax.set_xlabel('X Label')\nax.set_ylabel('Y Label')\n\nax.tick_params(which = 'both', labelsize = 16)\nax.set_xlabel('x', size = 22, labelpad = 30)\nax.set_ylabel('t', size = 22, labelpad = 30)\nax.set_zlabel('u', size = 22, labelpad = 10)\nax.set_zlim([0,1])\nplt.show()### Propagation with randomly generated coefficientsBut let's go back: the PSBC initialize these coeficients in a randomized fashion. Let's see more or less what it looks likeN = 1\ninit = Initialize_parameters()\nprop = Propagate()\ndt_vec = np.array([.1,.3,.57,1.5,3,4])\ndt, eps, Nx, Nt, dx = .1, 0, N, 20, 1\nptt_cardnlty, weights_k_sharing = Nx, Nt\nparam = init.dictionary(N, eps, dt, dx, Nt, ptt_cardnlty, weights_k_sharing)\n\nfor i in range(param[\"Nt\"]): param[\"alpha_x_t\"][:,i] = np.random.uniform(0,1)\n\nn_points = 10 \nV_0 = np.reshape(1/n_points * np.arange(0, n_points + 1), (1, -1))\nflow, waterfall, time = prop.forward(V_0, param, waterfall_save = True , Flow_save = True)\n# Setting a random seed for reproduction of graph, set np.random.seed(123)\nnp.random.seed(8123)\nA = np.reshape(np.random.uniform(0,1, size = Nt), (1,-1))\nx_ticks = np.arange(0,21,2)\nn_points = 10 \nV_0 = np.reshape(1/n_points*np.arange(0,n_points+1), (1,-1))\n\nfig, ax = plt.subplots(\n ncols = 1, nrows = 6, figsize = (12,16), sharex = True, gridspec_kw = {'wspace':20}\n)\n\nm, M = 0,1\n\nfor i in range(6):\n dt = dt_vec[i]\n param = init.dictionary(N, eps, dt, dx, Nt, ptt_cardnlty, weights_k_sharing)\n param[\"alpha_x_t\"] = A\n\n flow, waterfall, time = prop.forward(V_0, param, waterfall_save = True , Flow_save = True)\n \n m, M = min(m, np.min(flow)), max(M, np.max(flow))\n \n if i <3: ax[i].set_title(r\"$\\Delta_{\\mathrm{t}}^{\\mathrm{u}}$ = \" + str(dt))\n else: ax[i].set_title(r\"$\\Delta_{\\mathrm{t}}^{\\mathrm{*}}$ = \" + str(dt))\n \n ax[i].set_xlim([-.2,20.2])\n ax[i].set_ylabel(r\"$U^{[n]}(X;\\alpha^{[n-1]})$\")\n ax[i].set_xticks(x_ticks)\n \n if i == 5: ax[i].set_xlabel(r\"$n$ (layer number)\")\n\n ax[i].scatter(np.arange(0,param[\"alpha_x_t\"].shape[1]),param[\"alpha_x_t\"].T, marker='o',s = 30)\n ax[i].plot(flow[0,:].T)\n ax[i].grid(True, axis = 'x')\n \n## Adjust height\nfor i in range(6): ax[i].set_ylim([m,M])\n\nplt.show() Applying the PSBC model to some toy problems\n\nAs we did in the paper, we shall present the model in a simple toy problem, for illustrative purposes. We shall apply the PSBC later on to the MNIST dataset, where it has been trained.\n\n### The 1D Rectangular box problem\n\nWe shall work with a simple 1D model (the rectangular box problem), as the one used in the paper.folder = \"Statistics/MNIST/\"\nwith open(folder + \"parameters_MNIST_Neumann.p\", 'rb') as fp: data = pickle.load(fp)\n### GENERATE DATA\ngamma, N_data = .2, 2000\nX = np.reshape(np.random.uniform(0, 1, N_data),(1, -1))\nY = np.array(X >= gamma, np.int, ndmin = 2)\n\n### SPLIT DATA FOR CROSS VALIDATION\nA, B, C, D = train_test_split(X.T, Y.T, test_size = 0.2)\n#### We shall save one individual per column. We need to change that upon reading the csv later on\nX_train, X_test, Y_train, Y_test = A.T, B.T, C.T, D.TIn this model, the data has to satisfy features dimension X number of elements in the samplenp.shape(X_train)Now let's define the parameterslearning_rate = (.1,.08,.93)\npatience = float(\"inf\")\nsigma = .1\ndrop_SGD = 0.95 # See docstring of class \"Binary_phase_separation\" for further information\nepochs, dt, dx, eps, Nx, Nt = 600, .1, 1, 0, 1, 20\nweights_k_sharing = Nt\nptt_cardnlty = 1\nbatch_size = None\nsubordinate, save_parameter_hist, orthodox_dt, with_phase = True, True, True, Trueand initialize the modelInit = Initialize_parameters() \n\ndata = Init.dictionary(Nx, eps, dt, dx, Nt, ptt_cardnlty, weights_k_sharing, sigma = sigma )\ndata.update({'learning_rate' : learning_rate, 'epochs' : epochs,\\\n 'subordinate' : subordinate,\"patience\" : patience,\\\n 'drop_SGD' : drop_SGD,\"orthodox_dt\" : orthodox_dt,'with_phase' : with_phase,\n \"batch_size\" : batch_size, \"save_parameter_hist\" : save_parameter_hist })We are finally ready to train the model. We do so using the class Binary_Phase_SeparationModel = Binary_Phase_Separation()Of which you can learn more about by typing print(Model.__doc__) \n This is the main class of the Phase Separation Binary Classifier (PSBC).\n With its methods one can, aong other things, train the model and \n predict classifications (once the model has been trained).\n \nIf the above is not enough you can do this:print(help(Model))Help on Binary_Phase_Separation in module binary_phase_separation object:\n\nclass Binary_Phase_Separation(builtins.object)\n | Binary_Phase_Separation(cost=None, par_U_model=None, par_P_model=None, par_U_wrt_epochs=None, par_P_wrt_epochs=None)\n | \n | This is the main class of the Phase Separation Binary Classifier (PSBC).\n | With its methods one can, aong other things, train the model and \n | predict classifications (once the model has been trained).\n | \n | Methods defined here:\n | \n | __init__(self, cost=None, par_U_model=None, par_P_model=None, par_U_wrt_epochs=None, par_P_wrt_epochs=None)\n | Class initializer. \n | \n | Parameters\n | ----------\n | cost : {bool, True}, optional\n | par_U_model : {dictionary, None}, optional\n | Dictionary containing initialization parameters for the U component\n | of the PSBC.\n | par_P_model : {dictionary, None}, optional\n | Dictionary containing initialization parameters for the P com[...]But this is maybe too much. So, let's say that you just want to know about how to train. You can get information only about that methodprint(Model.train.__doc__)\n 'train' method.\n\n This method trains the PSBC model with a given set of parameters and \n data.\n \n Parameters\n ----------\n X : numpy.ndarray of size Nx X N_data\n Matrix with features. \n Y : numpy.ndarray of size 1 X N_data\n Matrix with labels. \n X_test : numpy.ndarray of size Nx X N_data_test\n Matrix with features. \n Y_test : numpy.ndarray of size 1 X N_data_test\n Matrix with labels. \n learning_rate : float or tuple\n If Tuple with three elements (a,b,c), \n these numbers parametrize the learning rate decay.\n dt : float\n Mesh grid size of time discretization \n dx : float\n Mesh grid size of spatial discretization. \n layers : int\n Number o f layers. \n weights_K_sharing : int\n Number of successive layers that are sharing their weights.\n eps : {float, 0}, optional\n [...]The method that we want is train. So, we do Model.train(\n X_train, Y_train, X_train, Y_train, learning_rate, dt, dx, Nt,\\\n weights_k_sharing, eps = eps, epochs = epochs, \\\n subordinate = subordinate, with_phase = with_phase,\\\n drop_SGD = drop_SGD, sigma = sigma,\\\n orthodox_dt = orthodox_dt, print_every = 300,\\\n save_parameter_hist = save_parameter_hist\n)\n epoch : 0 cost 0.11494985702898435\n\n accuracy : 0.70375\n\n epoch : 300 cost 0.022553932287346947\n\n accuracy : 0.9775\nIf you want to take a look at how the cost function behaves over epochs, you can plot it as\ncost_over_epochs = Model.cost\n\nx = np.arange(len(cost_over_epochs))\nf, ax = plt.subplots(figsize = (15,5))\nax.plot(x, cost_over_epochs, lw = 3)\n\nax.set_title(\"Cost over epochs\")\nax.set_ylabel(\"Cost\")\nax.set_xlabel(\"Epochs\")\nax.grid(True)\nplt.show()And if you want to take a look at the behavior of the set $\\mathscr{P}_{\\alpha}$ you can also do. Just typediameter_history = Model.diameters_histwhich will give you a dictionary with two keys: \"U\" and \"P\"diameter_history.keys()They concern the behavior of trainable weights for the U variable, and for the P variable. They can be plotted as fig, ax = plt.subplots( nrows = 2, figsize = (15,10))\n\ncolors = pl.cm.tab20(np.linspace(0,1,11))\n\nax[0].plot(\n diameter_history[\"U\"], linestyle = '-', lw = 3,\\\n label = None, color = colors[0] \n)\nax[1].plot(\n diameter_history[\"P\"], linestyle = (0,(3,1,1,1,1,1)), lw = 3,\\\n label = None, color = colors[1]\n)\n\nfig.suptitle(\"Maximum of trainable weights evolution\")\nax[0].legend(loc = 4, fontsize = 16, ncol = 3)\nax[0].set_ylabel(r'Diameter$\\left(\\mathscr{P}_{\\alpha}^{[\\mathrm{N_t}-1]}\\right)$')\nax[0].set_xlabel('Number of iterations')\nax[0].grid(True)\nax[1].set_ylabel(r'Diameter$\\left(\\mathscr{P}_{\\beta}^{[\\mathrm{N_t}-1]}\\right)$')\nax[1].set_xlabel('Number of iterations')\nax[1].grid(True)\nplt.show()No handles with labels found to put in legend.\nSubstituting with a symbol from Computer Modern.\nSubstituting with a symbol from Computer Modern.\nSubstituting with a symbol from Computer Modern.\nSubstituting with a symbol from Computer Modern.\nThis is the typical behavior of these quantities. Note that they remain constant (equal to 1) up to a certain point, and then they grow in a logarithmic shape. Note that the point of departure from the value 1 is different for both variables. That's because they have a separate dynamics, and are allowed to vary independently.\n\n\nLast, since we are saving parameters (setting the variable \"save_parameter_hist = True\") we can see the behvarior of the accuracy throughout epochs: we are saving all the parameters in the model at each epoch. We wemark that is applications it is better to set \"save_parameter_hist = False\" in order to save memory accuracies_fnt = Model.accuracies_hist\nx = np.arange(len(accuracies_fnt))\n\n## Plotting\nf, ax = plt.subplots(figsize = (15,5))\nax.plot(x, accuracies_fnt, lw = 3)\nf.suptitle(\"Accuracy over epochs\")\nax.set_ylabel(\"Accuracy\")\nax.set_xlabel(\"Epochs\")\nax.grid(True)\nplt.show()Note the the model peaks (reaches a point of high accuracy) before the final epoch. This natural \"deterioration\" is what lead researchers to design Early Stopping techniques. We can in fact know what that epoch was by typingModel.best_epochWhose value wasModel.best_accuracythat is, 100\\% accuracy.If you want to retrieve the model parameters at such an epoch you just need to typebest_P , best_U = Model.best_par_P_model, Model.best_par_U_modelwhich will give the value of the parameters used when the model achieved its best performance.number_tests = Model.par_U_model[\"epochs\"]\n\naccuracy_train, accuracy_test = [] , []\nfor j in range(number_tests - 1):\n\n _, aux_train, accuracy_train_now =\\\n Model.predict_and_accuracy(\n X_train,Y_train, Model.par_U_wrt_epochs[str(j)], Model.par_P_wrt_epochs[str(j)],\\\n subordinate = subordinate,with_phase = with_phase)\n _, aux_test, accuracy_test_now =\\\n Model.predict_and_accuracy(\n X_test, Y_test, Model.par_U_wrt_epochs[str(j)], Model.par_P_wrt_epochs[str(j)],\\\n subordinate = subordinate, with_phase = with_phase)\n\n # Accuracies\n accuracy_train.append(accuracy_train_now)\n accuracy_test.append(accuracy_test_now)\nx = np.arange(len(accuracy_train))\n\n## Plotting\nf, ax = plt.subplots(figsize = (15,8))\nplt.plot(x, accuracy_train, lw = 3, label = \"Train\")\nplt.plot(x, accuracy_test, lw = 3, label = \"Test\")\nf.suptitle(\"Accuracy over epochs\")\nax.set_ylabel(\"Accuracy\")\nax.set_xlabel(\"Epochs\")\nax.legend(loc = 4)\nax.grid(True)\nplt.show() The MNIST dataset #########################################################################\n### READ MNIST DATASET TO PANDAS DATAFRAME AND THEN TO CSV FILE\n#########################################################################\ndata_train_MNIST = pd.read_csv('Examples/data_train_normalized_MNIST.csv')\ndata_test_MNIST = pd.read_csv('Examples/data_test_normalized_MNIST.csv')\nX_train_MNIST = (data_train_MNIST.iloc[:,:-1]).to_numpy()\nY_train_MNIST = np.reshape(data_train_MNIST.iloc[:,-1].to_numpy(), (1,-1))\nX_test_MNIST = (data_test_MNIST.iloc[:,:-1]).to_numpy()\nY_test_MNIST = np.reshape(data_test_MNIST.iloc[:,-1].to_numpy(), (1,-1))\n\nX_train_MNIST, X_test_MNIST = X_train_MNIST.T , X_test_MNIST.T\nprint(X_test_MNIST.shape, Y_test_MNIST.shape)(784, 2956) (1, 2956)\nSee https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.MinMaxScaler.html#sklearn.preprocessing.MinMaxScaler(X_train_MNIST.min(axis = 0)[:5], X_train_MNIST.max(axis = 0)[:5]),\\\n(X_test_MNIST.min(axis = 0)[:5], X_test_MNIST.max(axis = 0)[:5])\nprint(\n \"Train, 0:\\t\",len(np.squeeze(np.where(Y_train_MNIST == 0))[1,:])/Y_train_MNIST.shape[1],\\\n \"\\nTrain, 1: \\t\",len(np.squeeze(np.where(Y_train_MNIST == 1))[1,:])/Y_train_MNIST.shape[1]\n)\n\nprint(\n \"Test, 0:\\t\",len(np.squeeze(np.where(Y_test_MNIST == 0))[1,:])/Y_test_MNIST.shape[1],\\\n \"\\nTest, 1: \\t\",len(np.squeeze(np.where(Y_test_MNIST == 1))[1,:])/Y_test_MNIST.shape[1]\n)\nwhere_0 = np.squeeze(np.where(Y_train_MNIST == 0))[1,:10]\nwhere_1 = np.squeeze(np.where(Y_train_MNIST == 1))[1,:10]\n\nplt.figure(figsize = (15,10))\n\npick = np.asarray(where_0)\nzero_images = np.array([], dtype = np.int64).reshape(28,0)\nimages = [np.reshape(X_train_MNIST[:,pick[image_index]], (28,28)) for image_index in range(10) ]\nfor image in images:\n zero_images = np.concatenate([zero_images,image], axis = 1)\n\npick = np.asarray(where_1)\none_images = np.array([], dtype = np.int64).reshape(28,0)\nimages = [np.reshape(X_train_MNIST[:,pick[image_index]], (28,28)) for image_index in range(10) ]\nfor image in images:\n one_images = np.concatenate([one_images,image], axis = 1)\n\nboth = np.concatenate([zero_images,one_images], axis = 0) \nplt.axis(\"off\")\n\nplt.imshow(both, cmap = \"binary\")\nplt.show()One can also visualize the trainable weights as heatmaps.parent_folder = \"Examples/\"\n\n## Non-subordinate ########################################################\nsub_non_sub = {}\nfolder_now = parent_folder + \"W1S-NS/simulation1/\"\n\nwith open(folder_now + \"Full_model_properties.p\", 'rb') as fp: Full_model_properties = pickle.load(fp)\n\nsub_non_sub[\"non\" + \"best_par_P\"] = Full_model_properties[\"best_par_P_model\"]\nsub_non_sub[\"non\" + \"best_par_U\"] = Full_model_properties[\"best_par_U_model\"]\n\n## Subordinate ############################################################\nparent_folder = \"Examples/\"\nfolder_now = parent_folder + \"W1S-S/simulation1/\"\n\nwith open(folder_now + \"Full_model_properties.p\", 'rb') as fp: Full_model_properties = pickle.load(fp)\n\nsub_non_sub[\"sub\" + \"best_par_P\"] = Full_model_properties[\"best_par_P_model\"]\nsub_non_sub[\"sub\" + \"best_par_U\"] = Full_model_properties[\"best_par_U_model\"]\n\n## W1S-Nt8 ##################################################################\nparent_folder = \"Examples/\"\nfolder_now = parent_folder + \"W1S-Nt8/simulation1/\"\n\nwith open(folder_now + \"Full_model_properties.p\", 'rb') as fp: Full_model_properties = pickle.load(fp)\n\nsub_non_sub[\"kfold\" + \"best_par_P\"] = Full_model_properties[\"best_par_P_model\"]\nsub_non_sub[\"kfold\" + \"best_par_U\"] = Full_model_properties[\"best_par_U_model\"]\nimport seaborn as sns\n%matplotlib inline\nf, ax = plt.subplots(ncols = 3, nrows = 2, figsize = (15,10), constrained_layout = False)\n\nlist_sub = (\"non\", \"sub\", \"kfold\")\nlist_sub_big = (\"Non-subordinate\\n(Weights-1-sharing)\",\\\n \"Subordinate\\n(Weights-1-sharing)\",\\\n \"Subordinate,\"+ r\"$\\mathrm{N_t}=8$\"+\"\\n\"+r\"(Weights-$1$-sharing)\")\n\nm_1 = np.min([sub_non_sub[list_sub[i] + \"best_par_P\"][\"alpha_x_t\"].min() for i in range(3)])\nM_1 = np.max([sub_non_sub[list_sub[i] + \"best_par_P\"][\"alpha_x_t\"].max() for i in range(3)])\n\nfor i in range(3):\n Nx = sub_non_sub[list_sub[i] + \"best_par_P\"][\"alpha_x_t\"].shape[0]\n sub_non_sub[str(i) + \"matrix\"] = sub_non_sub[list_sub[i] + \"best_par_P\"][\"alpha_x_t\"]\n sns.heatmap(\\\n sub_non_sub[str(i) + \"matrix\"], ax = ax[0,i], vmin = m_1, vmax = M_1, cbar = False,\\\n cmap = 'inferno'\n )\n pcm = ax[0,i].pcolormesh(sub_non_sub[str(i) + \"matrix\"])\n ax[0,i].set_title(list_sub_big[i], size = 20)\n\nf.subplots_adjust(right=0.9)\ncbar_ax = f.add_axes([.92, .55, .03, .3])\nf.colorbar(pcm, cax = cbar_ax)\n \nax[0,0].set_ylabel(r\"$W_P^{[\\cdot]} = \\beta^{[\\cdot]}$\",rotation=90, size = 18)\n\n\nm_2 = np.min([sub_non_sub[list_sub[i] + \"best_par_U\"][\"alpha_x_t\"].min() for i in range(3)])\nM_2 = np.max([sub_non_sub[list_sub[i] + \"best_par_U\"][\"alpha_x_t\"].max() for i in range(3)])\n\nfor i in range(3):\n Nx = sub_non_sub[list_sub[i] + \"best_par_U\"][\"alpha_x_t\"].shape[0]\n M = sub_non_sub[list_sub[i] + \"best_par_U\"][\"alpha_x_t\"]\n sub_non_sub[str(i) + \"matrix\"] = M\n \n sns.heatmap(M, ax = ax[1,i], vmin = m_2, vmax = M_2, cbar = False, cmap = 'inferno')\n pcM = ax[1,i].pcolormesh(sub_non_sub[str(i) + \"matrix\"])\n \ncbar_ax2 = f.add_axes([.92, .15, .03, .3])\nf.colorbar(pcM, cax = cbar_ax2)\nax[1,0].set_ylabel(r\"$W_U^{[\\cdot]} = \\alpha^{[\\cdot]}$\", rotation = 90, size = 18)\nplt.show() Retrieving some statistics parameters_MNIST_nondif, stats_folder_MNIST = {}, \"Statistics/MNIST/\"\n\nwith open(stats_folder_MNIST + \"parameters_MNIST_nondif.p\", 'rb') as fp: \n parameters_MNIST_nondif = pickle.load(fp)\n\nparameters_MNIST_Neumann, stats_folder_MNIST = {}, \"Statistics/MNIST/\"\n\nwith open(stats_folder_MNIST + \"parameters_MNIST_Neumann.p\", 'rb') as fp:\n parameters_MNIST_Neumann = pickle.load(fp)\n\nparameters_MNIST_Periodic, stats_folder_MNIST = {}, \"Statistics/MNIST/\"\n\nwith open(stats_folder_MNIST + \"parameters_MNIST_Periodic.p\", 'rb') as fp:\n parameters_MNIST_Periodic = pickle.load(fp)\nhelp(accuracies)Help on function accuracies in module aux_fnts_for_jupyter_notebooks:\n\naccuracies(parameters, name, accuracy_type, number_folders=10, number_simulations=10)\n This function is only used in the jupyter notebook for the MNIST dataset\n \n \n Parameters\n ----------\n parameters : dictionary\n Dictionary containing summary of data for some PSBC experiments.\n name : string\n Name of the keys of the dictionary \"parameters\" that we are studying, corresponding to a PSBC configuration.\n accuracy_type : string\n Either \"best_accuracy_train\" or \"best_accuracy_test\".\n number_folders : {int, 10}, optional\n Number of folders, where each folder corresponds of one value of the parameter being valued.\n number_simulations : {int, 10}, optional\n Number of simulations that were run with the same parameter, for statistical purposes.\n \n Returns\n -------\n A : matrix\n Matrix with all the accuracies of type accuracy_type, \n wher[...]For more details about the code in the next plot, see Remark 1 (cell 20) in the jupyter-notebook **Notebook_PSBC_MNIST.ipynb**.A_train_1NS, value_of_parameter_varying = accuracies (\n parameters_MNIST_nondif, \"W1S-NS\", \"best_accuracy_train\")\nvalue_of_parameter_varying = value_of_parameter_varying[::-1]\naverage_train_1NS, stdev_train_1NS =\\\nnp.mean(A_train_1NS, axis = 1)[::-1], np.std(A_train_1NS, axis = 1)[::-1]\n\nA_train_1S, _ = accuracies (\n parameters_MNIST_nondif, \"W1S-S\", \"best_accuracy_train\")\naverage_train_1S, stdev_train_1S = np.mean(A_train_1S, axis = 1)[::-1], np.std(A_train_1S, axis = 1)[::-1]\n\nA_train_NS, _ = accuracies (\n parameters_MNIST_nondif, \"WNtS-NS\", \"best_accuracy_train\")\naverage_train_NS, stdev_train_NS = np.mean(A_train_NS, axis = 1)[::-1], np.std(A_train_NS, axis = 1)[::-1]\n\nA_train_S, _ = accuracies (\n parameters_MNIST_nondif, \"WNtS-S\", \"best_accuracy_train\")\naverage_train_S, stdev_train_S = np.mean(A_train_S, axis = 1)[::-1], np.std(A_train_S, axis = 1)[::-1]\n\ncolors = pl.cm.tab10(np.linspace(0,1,9))\ncolors_markers = pl.cm.tab10(np.linspace(0,1,9))\n\n# Parameters using during pltting\ncol_NS, mk_NS, alpha_for_all, L, T = 0, 1, 0.6, -3, .008\nvalue_of_parameter_varying = np.asarray(value_of_parameter_varying)\n\nfig,ax = plt.subplots(\n nrows = 2, ncols = 2, figsize = (15,12), sharey='row', sharex = 'col',\\\n gridspec_kw = {'wspace':0,'hspace':0}, constrained_layout = False\n)\n\nmarkers, caps, bars = ax[0,0].errorbar(\n (1 + T)*value_of_parameter_varying[:L], average_train_1NS[:L], marker = 'o',\\\n yerr = stdev_train_1NS[:L], errorevery = 1, linestyle = '-',\\\n label = \"Non Sub\", color = colors[col_NS+3],\\\n fillstyle = 'none', markeredgecolor = colors_markers[mk_NS-1]\n)\n[bar.set_alpha(alpha_for_all) for bar in bars]\n\nmarkers, caps, bars = ax[0,0].errorbar(\n (1/(1 + T))*value_of_parameter_varying[:L], average_train_1S[:L], marker = 'x',\\\n yerr = stdev_train_1S[:L], errorevery = 1, linestyle = '-',\\\n label = \"Sub\", color = colors[col_NS],\\\n fillstyle = 'none', markeredgecolor = colors_markers[mk_NS+2] \n)\n[bar.set_alpha(alpha_for_all) for bar in bars]\n\nmarkers, caps, bars = ax[0,1].errorbar(\n (1 + T)*value_of_parameter_varying[:L], average_train_NS[:L], marker = 'o',\\\n yerr = stdev_train_NS[:L], errorevery = 1, linestyle = '-',\\\n label = \"Non Sub\", color = colors[col_NS+3],\\\n fillstyle = 'none', markeredgecolor = colors_markers[mk_NS-1]\n)\n[bar.set_alpha(alpha_for_all) for bar in bars]\n\nmarkers, caps, bars = ax[0,1].errorbar(\n (1/(1 + T))*value_of_parameter_varying[:L], average_train_S[:L], marker = 'x',\\\n yerr = stdev_train_S[:L], errorevery = 1, linestyle = '-',\\\n label = \"Sub\", color = colors[col_NS],\\\n fillstyle = 'none', markeredgecolor = colors_markers[mk_NS+2] \n)\n[bar.set_alpha(alpha_for_all) for bar in bars]\n\nA_test_1NS, _ = accuracies (parameters_MNIST_nondif, \"W1S-NS\", \"best_accuracy_test\")\naverage_test_1NS, stdev_test_1NS = np.mean(A_test_1NS, axis = 1)[::-1], np.std(A_test_1NS, axis = 1)[::-1]\n\nA_test_1S, _ = accuracies (parameters_MNIST_nondif, \"W1S-S\", \"best_accuracy_test\")\naverage_test_1S, stdev_test_1S = np.mean(A_test_1S, axis = 1)[::-1], np.std(A_test_1S, axis = 1)[::-1]\n\nA_test_NS, _ = accuracies (parameters_MNIST_nondif, \"WNtS-NS\", \"best_accuracy_test\")\naverage_test_NS, stdev_test_NS = np.mean(A_test_NS, axis = 1)[::-1], np.std(A_test_NS, axis = 1)[::-1]\n\nA_test_S, _ = accuracies (parameters_MNIST_nondif, \"WNtS-S\", \"best_accuracy_test\")\naverage_test_S, stdev_test_S = np.mean(A_test_S, axis = 1)[::-1], np.std(A_test_S, axis = 1)[::-1]\n\nmarkers, caps, bars = ax[1,0].errorbar(\n (1 + T)*value_of_parameter_varying[:L], average_test_1NS[:L], marker = 'o', \\\n yerr = stdev_test_1NS[:L], errorevery = 1, linestyle = '-',\\\n label = \"Non Sub\", color = colors[col_NS+3],\\\n fillstyle = 'none', markeredgecolor = colors_markers[mk_NS-1]\n)\n[bar.set_alpha(alpha_for_all) for bar in bars]\n\nmarkers, caps, bars = ax[1,0].errorbar(\n (1/(1 + T))*value_of_parameter_varying[:L], average_test_1S[:L], marker = 'x',\\\n yerr = stdev_test_1S[:L], errorevery = 1, linestyle = '-',\\\n label = \"Sub\", color = colors[col_NS],\\\n fillstyle = 'none', markeredgecolor = colors_markers[mk_NS+2]\n)\n[bar.set_alpha(alpha_for_all) for bar in bars]\n\nmarkers, caps, bars = ax[1,1].errorbar(\n (1 + T)*value_of_parameter_varying[:L], average_test_NS[:L], marker = 'o', \\\n yerr = stdev_test_NS[:L], errorevery = 1, linestyle = '-',\\\n label = \"Non-sub\", color = colors[col_NS+3],\\\n fillstyle = 'none', markeredgecolor = colors_markers[mk_NS-1]\n)\n[bar.set_alpha(alpha_for_all) for bar in bars]\n\nmarkers, caps, bars = ax[1,1].errorbar(\n (1/(1 + T))*value_of_parameter_varying[:L], average_test_S[:L], marker = 'x',\\\n yerr = stdev_test_S[:L], errorevery = 1, linestyle = '-',\\\n label = \"Sub\", color = colors[col_NS],\\\n fillstyle = 'none', markeredgecolor = colors_markers[mk_NS+2]\n)\n[bar.set_alpha(alpha_for_all) for bar in bars]\n\nfor i in [0,1]:\n for j in [0,1]:\n ax[i,j].grid(axis = \"y\")\n ax[i,j].set_xscale('log') \n ax[i,j].set_xticks(np.asarray(value_of_parameter_varying[:L]))\n ax[i,j].tick_params(\"x\")\n ax[i,j].get_xaxis().set_major_formatter(ScalarFormatter())\n ax[i,j].xaxis.set_minor_formatter(plt.matplotlib.ticker.NullFormatter())\n\nax[0,0].set_title(r\"Weights-$1$-sharing\", size = 18) \nax[0,1].set_title(r\"Weights-$\\mathrm{N_t}$-sharing\", size = 18) \nax[0,0].set_ylabel(\"Accuracy train (average)\") \nax[1,0].set_ylabel(\"Accuracy test (average)\")\n\nax[1,0].set_xlabel(r\"$\\mathrm{N_{ptt}}$ (in logarithmic scale)\")\nax[1,1].set_xlabel(r\"$\\mathrm{N_{ptt}}$ (in logarithmic scale)\")\n\nchartBox = ax[1,1].get_position()\nax[1,1].set_position([chartBox.x0, chartBox.y0, chartBox.width*0.2, chartBox.height])\nax[1,1].legend(loc = 'upper center', bbox_to_anchor = (0, 0.15), shadow = True, ncol = 4)\n\nfig.set_tight_layout({'rect': [0, 0, 1, 0.95]})\nfig.suptitle(\"Non-diffusive PSBC\", size = 20)\nplt.show()Or in case you want to see the evolution of the maximum over epochs, for Periodic PSBC with Nt =1fig, ax = plt.subplots(nrows = 2, figsize = (15,10))\n\n_, value_of_parameter_varying = accuracies (\n parameters_MNIST_Periodic, \"Per_W1S-Nt2\", \"best_accuracy_train\", number_folders = 13)\n\ncolors = pl.cm.tab20(np.linspace(0,1,16))\n\ndef test_label_value_of_parameter_varying(x):\n if x == 0:\n return \"0\"\n j = int(np.ceil(np.log2(x))) \n return r'$2^{{{0}}}$'.format(j)\n\nparam = parameters_MNIST_Periodic[\"Per_W1S-Nt8\"]\nfor i in range(1, 14):\n param_now = param[str(i)]\n diam_hist_now = param_now['diam_hist']\n ax[0].plot(diam_hist_now[\"U\"], linestyle = '-', lw = 3,\\\n label = str(test_label_value_of_parameter_varying(value_of_parameter_varying[i-1])),\\\n color=colors[i] )\n ax[1].plot(diam_hist_now[\"P\"], linestyle = (0,(3,1,1,1,1,1)), lw = 3,\\\n label = str(test_label_value_of_parameter_varying(value_of_parameter_varying[i-1])),\\\n color = colors[i])\n\nax[0].legend(loc = 2, fontsize = 16, ncol = 3)\nax[0].set_ylabel(r'Diameter$\\left(\\mathscr{P}_{\\alpha}^{[\\mathrm{N_t}-1]}\\right)$')\nax[0].set_xlabel('Number of iterations')\nax[0].grid(True)\n\nax[0].legend(loc = 2, ncol = 3, title = r\"$\\varepsilon$\")\nplt.rcParams[\"legend.title_fontsize\"] = 20\nplt.rcParams[\"legend.columnspacing\"] = .8\n\nax[1].set_ylabel(r'Diameter$\\left(\\mathscr{P}_{\\beta}^{[\\mathrm{N_t}-1]}\\right)$')\nax[1].set_xlabel('Number of iterations')\nax[1].grid(True)\nplt.show()Substituting with a symbol from Computer Modern.\nSubstituting with a symbol from Computer Modern.\nSubstituting with a symbol from Computer Modern.\nSubstituting with a symbol from Computer Modern.\nOr one could draw some confusion matrices. For example., the confution matrix for a realization of the diffusive PSBC with Neumann BCs, weights-1-sharing, and Nt = 8 is given belowparent_folder = \"Examples/\"\nfolder_now = parent_folder + \"W1S-Nt8/simulation1/\"\nwith open(folder_now + \"Full_model_properties.p\", 'rb') as fp: Full_model_properties = pickle.load(fp)\nbest_predic_vector_test_now = np.squeeze(Full_model_properties[\"best_predic_vector_test\"])\nreal_tags = np.squeeze(Y_test_MNIST)\nfrom sklearn.metrics import confusion_matrix\nconf_matrix_example = confusion_matrix(real_tags, best_predic_vector_test_now )\nfig, axs = plt.subplots( nrows = 1,ncols = 1, figsize = (10, 10))\n\ncax_test = axs.matshow(conf_matrix_example, cmap = plt.cm.Blues)\naxs = sns.heatmap(conf_matrix_example/np.sum(conf_matrix_example),\\\n annot = True, annot_kws={\"size\": 20},\\\n fmt='.2%', cmap = 'Blues', ax = axs, cbar = False)\n\nplt.yticks(rotation = 0, fontsize = 18)\nplt.xticks(fontsize = 18)\naxs.xaxis.tick_top() # x axis on top\naxs.xaxis.set_label_position('top')\naxs.set_ylabel(\"True labels\", fontsize = 22)\naxs.set_xlabel(\"Predicted labels\", fontsize = 22)\nplt.show()### Predicting my own hadwritten 0 and 1So, as we have seen from the first example, this is a predictive model (that we refer to as PSBC) based on a reaction diffusion. In the second example we show the PSBC on a toy problem. As remarked extensively in the paper, we know that that is in fact quite a particular case of 1D problem where it performs well, because this is not expecited in general (see, in particular, Section 3.6 in the paper). \n\nTo highlight the interplay between high-dimensionality of feature spaces and model compressibility, we have applied the model to the subset \"0\"-\"1\" of the MNIST database. To illustrate a bit more of the model's use and also play with the trainable weights, we will now predict the label for our own handwritten numbers. One of the original pictures is given belowIn fact, I wrote 6 number - 3 zeros, 3 ones - for this notebook. If you read the first papers of LeCun et al. about the MNIST project, there is a description of the way pictures were taken, so that they look the way they do in cell 36 of this Notebook: the images had to be controlled for centralization, light contrast, etc. This is part of the statistical design, which I tried to follow as close as possible. \n\nI cropped the pictures using [GIMP](https://www.gimp.org), a free software for image manipulation: you take a picture, crop it, go to image, set it into grayscale. Then you can adjust for light contrast and other things. \n\nAnd that's it. Now, with the cropped, grayscale jpg in hands, you proceed as in the next cell. I show two examples below.from PIL import Image\nim_array0 = np.asarray(Image.open(\"figures/my_0.jpg\"))\nim_array1 = np.asarray(Image.open(\"figures/my_1.jpg\"))\nfig, ax = plt.subplots(1,2)\nax[0].imshow(im_array0, cmap='binary')\nax[1].imshow(im_array1, cmap='binary')\nax[0].axis(False)\nax[1].axis(False)\nplt.show()Now we reshape this pictures as a 28 x 28 matrix.from PIL import Image\n\ndef create_MNIST_type_figure(name):\n \"\"\"Convert jpg figure to a (28,28) numpy array\"\"\"\n image = Image.open(name).convert('L')\n image2 = image.resize((28,28))\n im2_as_array = 255- np.array(image2, dtype=np.uint8)\n print(\"image has shape\", im2_as_array.shape)\n\n return im2_as_array\n\nmy_0 = create_MNIST_type_figure(\"figures/my_0.jpg\")\nmy_0_v2 = create_MNIST_type_figure(\"figures/my_0_v2.jpg\")\nmy_0_v3 = create_MNIST_type_figure(\"figures/my_0_v3.jpg\")\nmy_1 = create_MNIST_type_figure(\"figures/my_1.jpg\")\nmy_1_v2 = create_MNIST_type_figure(\"figures/my_1_v2.jpg\")\nmy_1_v3 = create_MNIST_type_figure(\"figures/my_1_v3.jpg\")\n\nfig, ax = plt.subplots(1,2)\nax[0].imshow(my_0, cmap='binary')\nax[1].imshow(my_1, cmap='binary')\nax[0].axis(False)\nax[1].axis(False)\nplt.show()image has shape (28, 28)\nimage has shape (28, 28)\nimage has shape (28, 28)\nimage has shape (28, 28)\nimage has shape (28, 28)\nimage has shape (28, 28)\nRecall that we need to flatten these matrices,my_0_for_psbc = my_0.flatten(order='C')\nmy_0_for_psbc_v2 = my_0_v2.flatten(order='C')\nmy_0_for_psbc_v3 = my_0_v3.flatten(order='C')\nmy_1_for_psbc = my_1.flatten(order='C')\nmy_1_for_psbc_v2 = my_1_v2.flatten(order='C')\nmy_1_for_psbc_v3 = my_1_v3.flatten(order='C')and we can then combine all of them as columns in a single matrix.combined_handwritten = \\\nnp.c_[my_0_for_psbc, my_0_for_psbc_v2, my_0_for_psbc_v3, my_1_for_psbc, my_1_for_psbc_v2, my_1_for_psbc_v3]Now we load a PSBC model with open(\"Examples/W1S-Nt8/simulation1/Full_model_properties.p\", 'rb') as fp: load_mnist = pickle.load(fp)\n\npsbc_testing = Binary_Phase_Separation()\nprediction = psbc_testing.predict(combined_handwritten, load_mnist[\"best_par_U_model\"],load_mnist[\"best_par_P_model\"])\nprint(prediction)[0 0 0 0 0 0]\nThis seems really bad... but don't be worried! You should not forget: we need to satisfy the normalization conditions!! In fact, we are very far from that:np.max(combined_handwritten), np.min(combined_handwritten), combined_handwritten.shapeSo, let's normalize the datainit_data = Initialize_Data()\nhelp(init_data)Help on Initialize_Data in module binary_phase_separation object:\n\nclass Initialize_Data(builtins.object)\n | This class preprocess the data, normalizing it.\n | \n | Methods defined here:\n | \n | __init__(self)\n | Class initializer. No returned value.\n | \n | denormalize(self, Z, min_vals, max_vals, sigma=0.2)\n | 'denormalize' method.\n | \n | This method puts the data back to its original scale.\n | Of the non-normalized data the method uses its minimum value\n | min_vals, its original maxum value max_vals, and sigma.\n | The non-normalized data is transformed by\n | \n | A = ( 1 / sigma ) * ( Z - .5 + sigma /2)\n | \n | and then Z_2 = min_vals + A * (max_vals - min_vals).\n | \n | Z_2 is the returned value.\n | \n | Parameters\n | ----------\n | \n | Returns\n | -------\n | Non-normalized data 'A'.\n | \n | A : numpy.ndarray\n | \n | normalize(self, Z, sigma=0.2)\n | 'normalize' method.[...]So, the data get's normalized, but centered. By default, it gets rescaled in the range [0.4,0.6]. What we do then is: (i) we normalize it, then (ii) we add 0.1 to it.combined_handwritten_for_psbc, _, _ = init_data.normalize(combined_handwritten)\ncombined_handwritten_for_psbc = 0.1+ combined_handwritten_for_psbc\nnp.max(combined_handwritten_for_psbc), np.min(combined_handwritten_for_psbc)\ncombined_handwritten_for_psbc.shape\nfig, ax = plt.subplots(1,6,figsize = (15,5))\n\nfor i in range(6):\n ax[i].imshow(np.reshape(combined_handwritten_for_psbc[:,i],(28,28)), cmap = 'binary')\n ax[i].axis(\"off\")\nfor name in [\"W1S-NS\", \"W1S-S\", \"WNtS-NS\", \"WNtS-S\",\\\n \"W1S-Nt2\", \"W1S-Nt4\", \"W1S-Nt8\",\\\n \"WNtS-Nt1\",\"WNtS-Nt2\", \"WNtS-Nt4\", \"WNtS-Nt8\",\\\n \"Per_W1S-Nt2\", \"Per_W1S-Nt4\", \"Per_W1S-Nt8\",\\\n \"Per_WNtS-Nt1\",\"Per_WNtS-Nt2\", \"Per_WNtS-Nt4\", \"Per_WNtS-Nt8\"]:\n \n with open(\"Examples/\"+name+\"/simulation1/Full_model_properties.p\", 'rb') as fp:\n load_mnist = pickle.load(fp)\n \n psbc_testing = Binary_Phase_Separation()\n prediction = \\\n psbc_testing.predict(\n combined_handwritten_for_psbc, load_mnist[\"best_par_U_model\"], load_mnist[\"best_par_P_model\"],\\\n subordinate = load_mnist[\"best_par_U_model\"][\"subordinate\"]\n )\n print(\"Model\", name, \" predicts\", np.squeeze(prediction), \"and correct is, [0 0 0 1 1 1]\" )Model W1S-NS predicts [0 0 0 0 1 1] and correct is, [0 0 0 1 1 1]\nModel W1S-S predicts [0 0 0 0 1 1] and correct is, [0 0 0 1 1 1]\nModel WNtS-NS predicts [0 0 0 0 1 1] and correct is, [0 0 0 1 1 1]\nModel WNtS-S predicts [0 0 0 0 1 1] and correct is, [0 0 0 1 1 1]\nModel W1S-Nt2 predicts [0 0 0 0 1 1] and correct is, [0 0 0 1 1 1]\nModel W1S-Nt4 predicts [0 0 0 0 1 1] and correct is, [0 0 0 1 1 1]\nModel W1S-Nt8 predicts [0 0 0 0 1 1] and correct is, [0 0 0 1 1 1]\nModel WNtS-Nt1 predicts [0 0 0 0 1 1] and correct is, [0 0 0 1 1 1]\nModel WNtS-Nt2 predicts [0 0 0 0 1 1] and correct is, [0 0 0 1 1 1]\nModel WNtS-Nt4 predicts [0 0 0 0 1 1] and correct is, [0 0 0 1 1 1]\nModel WNtS-Nt8 predicts [0 0 0 0 1 1] and correct is, [0 0 0 1 1 1]\nModel Per_W1S-Nt2 predicts [0 0 0 0 1 1] and correct is, [0 0 0 1 1 1]\nModel Per_W1S-Nt4 predicts [0 0 0 0 1 1] and correct is, [0 0 0 1 1 1]\nModel Per_W1S-Nt8 predicts [0 0 0 0 1 1] and correct is, [0 0 0 1 1 1]\nModel Per_WNtS-Nt1 predicts [0 0 0 [...]"},"license":{"kind":"string","value":"no_license"},"path":{"kind":"string","value":"/PSBC_v1/Notebook_PSBC_examples.ipynb"},"repo_name":{"kind":"string","value":"rafael-a-monteiro-math/Binary_classification_phase_separation"},"chain_length":{"kind":"number","value":39,"string":"39"}}},{"rowIdx":4896,"cells":{"content":{"kind":"string","value":"# Single Layer Neural Network\n\n이번 시간에는 딥러닝 알고리즘의 가장 기본이 되는 인공신경망(artificial neural network, ANN), 그 중에서도 single-layer neural network 모델을 구현합니다. 오늘은 크게 크게 세 가지 방식, 1) Random Search, 2) h-step Search, 3) Gradient Descent 로 모델을 학습하는 법을 배우며, 이 중에 어떤 것이 가장 좋고 어떤 것을 선택해야하는지를 배웁니다.\nimport numpy as np## Case 1 - 0.3 x X1 + 0.5 x X2### Load Datasetx1 = np.random.rand(100)\n\nprint(x1.shape)\nx1[:10]\nx2 = np.random.rand(100)\n\nprint(x2.shape)\nx2[:10]\ny = 0.3 * x1 + 0.5 * x2 + 0.1\n\nprint(y.shape)\ny[:10](100,)\n### First idea: Random Searchnum_epoch = 10000\n\nbest_error = np.inf\nbest_epoch = None\nbest_w1 = None\nbest_w2 = None\nbest_b = None\n\nfor epoch in range(num_epoch):\n w1 = np.random.uniform(low=-1.0, high=1.0)\n w2 = np.random.uniform(low=-1.0, high=1.0)\n b = np.random.uniform(low=-1.0, high=1.0)\n\n y_predict = x1 * w1 + x2 * w2 + b\n \n error = np.abs(y_predict - y).mean()\n \n if error < best_error:\n best_error = error\n best_epoch = epoch\n best_w1 = w1\n best_w2 = w2\n best_b = b\n\n print(\"{0:4} w1 = {1:.5f}, w2 = {2:.5f}, b = {3:.5f}, error = {4:.5f}\".format(epoch, w1, w2, b, error))\n\nprint(\"----\" * 15)\nprint(\"{0:4} w1 = {1:.5f}, w2 = {2:.5f}, b = {3:.5f}, error = {4:.5f}\".format(best_epoch, best_w1, best_w2, best_b, best_error)) 0 w1 = 0.16720, w2 = 0.10272, b = -0.72859, error = 1.09869\n 1 w1 = 0.82804, w2 = 0.53150, b = 0.84809, error = 1.03204\n 2 w1 = 0.80962, w2 = 0.07287, b = -0.77298, error = 0.83242\n 4 w1 = -0.18429, w2 = 0.12042, b = 0.37839, error = 0.19042\n 7 w1 = 0.05444, w2 = 0.00767, b = 0.40372, error = 0.14693\n 21 w1 = -0.14689, w2 = 0.21581, b = 0.45108, error = 0.13260\n 60 w1 = 0.03061, w2 = 0.57022, b = 0.07129, error = 0.13072\n 96 w1 = -0.06154, w2 = 0.79950, b = 0.06346, error = 0.12450\n 216 w1 = 0.65567, w2 = 0.26152, b = 0.12255, error = 0.12167\n 264 w1 = 0.12405, w2 = 0.74461, b = 0.15717, error = 0.10481\n 409 w1 = 0.58461, w2 = 0.44052, b = -0.07694, error = 0.08539\n 785 w1 = 0.55593, w2 = 0.31329, b = 0.09222, error = 0.07776\n 806 w1 = 0.28493, w2 = 0.56734, b = 0.13940, error = 0.06612\n1001 w1 = 0.20557, w2 = 0.39686, b = 0.16333, error = 0.04413\n1464 w1 = 0.43189, w2 = 0.49839, b = 0.01952, error = 0.03420\n5299 w1 = 0.28513, w2 = 0.58166, b = 0.06983, error = 0.02158\n85[...]### Case 2 - h-step Searchnum_epoch = 15000\n\nw1 = np.random.uniform(low=-1.0, high=1.0)\nw2 = np.random.uniform(low=-1.0, high=1.0)\nb = np.random.uniform(low=-1.0, high=1.0)\n\nh = 0.01\n\nfor epoch in range(num_epoch):\n y_predict = x1 * w1 + x2 * w2 + b\n current_error = np.abs(y_predict - y).mean()\n\n if current_error < 0.005:\n break\n\n y_predict = x1 * (w1 + h) + x2 * w2 + b\n h_plus_error = np.abs(y_predict - y).mean()\n if h_plus_error < current_error:\n w1 = w1 + h\n else:\n y_predict = x1 * (w1 - h) + x2 * w2 + b\n h_minus_error = np.abs(y_predict - y).mean()\n if h_minus_error < current_error:\n w1 = w1 - h\n \n y_predict = x1 * w1 + x2 * (w2 + h) + b\n h_plus_error = np.abs(y_predict - y).mean()\n if h_plus_error < current_error:\n w2 = w2 + h\n else:\n y_predict = x1 * w1 + x2 * (w2 - h) + b\n h_minus_error = np.abs(y_predict - y).mean()\n if h_minus_error < current_error:\n w2 = w2 - h\n\n y_predict = x1 * w1 + x2 * w2 + (b + h)\n h_plus_error = np.abs(y_predict - y).mean()\n if h_plus_error < current_error:\n b = b + h\n else:\n y_predict = x1 * w1 + x2 * w2 + (b - h)\n h_minus_error = np.abs(y_predict - y).mean()\n if h_minus_error < current_error:\n b = b - h\n\nprint(\"{0} w1 = {1:.5f}, w2 = {2:.5f} b = {3:.5f} error = {4:.5f}\".format(epoch, w1, w2, b, current_error))109 w1 = 0.29811, w2 = 0.51574 b = 0.09606 error = 0.00472\n### Third Idea - Gradient Descentnum_epoch = 100\nlearning_rate = 1.1\n\nw1 = np.random.uniform(low=-1.0, high=1.0)\nw2 = np.random.uniform(low=-1.0, high=1.0)\nb = np.random.uniform(low=-1.0, high=1.0)\n\nfor epoch in range(num_epoch):\n y_predict = x1 * w1 + x2 * w2 + b\n\n error = np.abs(y_predict - y).mean()\n if error < 0.005:\n break\n\n w1 = w1 - learning_rate * ((y_predict - y) * x1).mean()\n w2 = w2 - learning_rate * ((y_predict - y) * x2).mean()\n b = b - learning_rate * (y_predict - y).mean()\n\n if epoch % 10 == 0:\n print(\"{0:2} w1 = {1:.5f}, w2 = {2:.5f} b = {3:.5f} error = {4:.5f}\".format(epoch, w1, w2, b, error))\n \nprint(\"----\" * 15)\nprint(\"{0:2} w1 = {1:.5f}, w2 = {2:.5f} b = {3:.5f} error = {4:.5f}\".format(epoch, w1, w2, b, error))\nnum_epoch = 100\nlearning_rate = 1.1\n\nw1 = np.random.uniform(low=-1.0, high=1.0)\nw2 = np.random.uniform(low=-1.0, high=1.0)\nb = np.random.uniform(low=-1.0, high=1.0)\n\nfor epoch in range(num_epoch):\n y_predict = x1 * w1 + x2 * w2 + b\n\n error = np.abs(y_predict - y).mean()\n if error < 0.005:\n break\n\n w1 = w1 - learning_rate * ((y_predict - y) * x1).mean()\n w2 = w2 - learning_rate * ((y_predict - y) * x2).mean()\n b = b - learning_rate * (y_predict - y).mean()\n\n if epoch % 10 == 0:\n print(\"{0:2} w1 = {1:.5f}, w2 = {2:.5f} b = {3:.5f} error = {4:.5f}\".format(epoch, w1, w2, b, error))\n\nprint(\"----\" * 15)\nprint(\"{0:2} w1 = {1:.5f}, w2 = {2:.5f} b = {3:.5f} error = {4:.5f}\".format(epoch, w1, w2, b, error)) 0 w1 = -0.28433, w2 = 0.17431 b = 1.00120 error = 0.54307\n10 w1 = -0.06276, w2 = 0.24165 b = 0.44684 error = 0.12318\n20 w1 = 0.11247, w2 = 0.35496 b = 0.27956 error = 0.06483\n30 w1 = 0.20332, w2 = 0.42073 b = 0.19483 error = 0.03415\n40 w1 = 0.24989, w2 = 0.45708 b = 0.15014 error = 0.01801\n50 w1 = 0.27391, w2 = 0.47691 b = 0.12651 error = 0.00950\n60 w1 = 0.28636, w2 = 0.48763 b = 0.11401 error = 0.00502\n------------------------------------------------------------\n61 w1 = 0.28636, w2 = 0.48763 b = 0.11401 error = 0.00471\n"},"license":{"kind":"string","value":"no_license"},"path":{"kind":"string","value":"/01_01-single-layer-neural-network-for-regression.ipynb"},"repo_name":{"kind":"string","value":"ikarus-999/DeepLearning01"},"chain_length":{"kind":"number","value":5,"string":"5"}}},{"rowIdx":4897,"cells":{"content":{"kind":"string","value":"# Short URL on this Colab: https://bit.ly/aieat-pycaret2# PyCaret 3.0 - Week 2\n## พัฒนา Model AI แบบ Low code ด้วย PyCaret\n\n1. PyCaret Classification\n2. PyCaret Time Series\n3. PyCaret Clustering# ติดตั้ง PyCaret\nติดตั้ง Pycaret ผ่าน pip (Package Installer for Python) ด้วยวิธีนี้!pip install --pre catboost pycaretLooking in indexes: https://pypi.org/simple, https://us-python.pkg.dev/colab-wheels/public/simple/\nCollecting catboost\n Downloading catboost-1.2-cp310-cp310-manylinux2014_x86_64.whl (98.6 MB)\n\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m98.6/98.6 MB\u001b[0m \u001b[31m2.3 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n\u001b[?25hCollecting pycaret\n Downloading pycaret-3.0.2-py3-none-any.whl (483 kB)\n\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m483.6/483.6 kB\u001b[0m \u001b[31m41.2 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n\u001b[?25hRequirement already satisfied: graphviz in /usr/local/lib/python3.10/dist-packages (from catboost) (0.20.1)\nRequirement already satisfied: matplotlib in /usr/local/lib/python3.10/dist-packages (from catboost) (3.7.1)\nRequirement already satisfied: numpy>=1.16.0 in /usr/local/lib/python3.10/dist-packages (from catboost) (1.22.4)\nRequirement already satisfied: pandas>=0.24 in /usr/local/lib/python3.10/dist-packages (from catboost) (1.5.3)\nRequirement already satisfied: scipy in [...]**เมื่อลงเสร็จแล้ว ต้องกดปุ่ม Restart Runtime เพื่อให้มัน Update Matplotlib เป็น Version ล่าสุด** และรัน Cell ข้างล่างเพื่อรัน Libary ที่ต้องใช้# Importing necessary libraries\n\nimport pandas as pd\nimport matplotlib.pyplot as plt \nimport matplotlib as mpl\nimport seaborn as sns\nfrom pycaret.datasets import get_data \n# ปรับ dpi ของกราฟให้ละเอียดมากพอ ไม่ให้รูปใหญ่เกิน Notebook Colab\nmpl.rcParams['figure.dpi'] = 300# Time-Series\nทำนายข้อมูลในอนาคตด้วยการทำนายราคา Bitcoin กันโดยดึงข้อมูลในรูปแบบ CSV มาจากเว็บ https://www.blockchain.com/charts/market-price\n\n## Time-Series vs Regression?\n* Time-series **ไม่สามารถใช้วิธีการแบ่งข้อมูลแบบเดียวกับงาน Regression** เพราะ Regression/Classification ใช้วิธีแบ่งข้อมูล Stratified random sampling ซึ่งเนื่องจากข้อมูลในอนาคตที่เราต้องการทำนายจะ Leak เราต้องแบ่งเป็น Rolling / Sliding Windows บน Dataset เพื่อให้ Fair ในการวัดผล\n* **Model Algorithm?** Regression กับ Time-Series Data มีความใกล้เคียงกัน แต่อยู่บนสันนินาฐที่ต่างกัน อาทิเช่นเรื่องความเป็น Seasonal ที่ Time-Series Data Prediction เอามาใช้ประโยชน์ได้ และมี Model ที่สร้างมาเพื่องานนี้แตกต่างกัน! wget -O bitcoin.csv https://raw.githubusercontent.com/iapp-technology/training_datasets/main/market-price-all.csv--2022-08-14 07:49:46-- https://raw.githubusercontent.com/iapp-technology/training_datasets/main/market-price-all.csv\nResolving raw.githubusercontent.com (raw.githubusercontent.com)... 185.199.108.133, 185.199.109.133, 185.199.110.133, ...\nConnecting to raw.githubusercontent.com (raw.githubusercontent.com)|185.199.108.133|:443... connected.\nHTTP request sent, awaiting response... 200 OK\nLength: 45473 (44K) [text/plain]\nSaving to: ‘bitcoin.csv’\n\n\rbitcoin.csv 0%[ ] 0 --.-KB/s \rbitcoin.csv 100%[===================>] 44.41K --.-KB/s in 0.001s \n\n2022-08-14 07:49:46 (57.5 MB/s) - ‘bitcoin.csv’ saved [45473/45473]\n\nเปิด Tab File ด้านขวา จะเจอไฟล์ชื่อ bitcoin.csv ทำการ Double Click เพื่อดูเนื้อหาได้เลยimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\n# Load the dataset\ndf = pd.read_excel(\"https://github.com/Benjamnk/DSE-2/raw/main/cane_area_MPV_TSR_date.xlsx\")\n\n# View the first few rows of the dataset\nprint(df.head())\n\n# Get summary statistics of the numerical columns\nprint(df.describe())\n\n# Check the data types of the columns\nprint(df.info())\n\n# Check for missing values\nprint(df.isnull().sum())\n\n# Perform data visualization\n\n# Histogram of a numerical column\nplt.figure(figsize=(10, 6))\nsns.histplot(df['area'], bins=30)\nplt.xlabel('Area')\nplt.ylabel('Count')\nplt.title('Distribution of Area')\nplt.show()\n\n# Scatter plot of two numerical columns\nplt.figure(figsize=(10, 6))\nsns.scatterplot(x='cane_type', y='area', data=df)\nplt.xlabel('Cane Type')\nplt.ylabel('Area')\nplt.title('Area vs. Cane Type')\nplt.show()\n\n# Box plot of a numerical column by a categorical column\nplt.figure(figsize=(10, 6))\nsns.boxplot(x='cane_type', y='area', data=df)\nplt.xlabel('Cane Type')\nplt.ylabel('Area')\nplt.title('Area by Cane Type')\nplt.show()\n\n# Correlation heatmap of numerical columns\nplt.figure(figsize=(10, 8))\nsns.heatmap(df.corr(), annot=True, cmap='coolwarm')\nplt.title('Correlation Heatmap')\nplt.show()\n\ndf.info()\nimport pandas as pd\nimport numpy as np\n\n# Load the dataset\ndf = pd.read_excel(\"https://github.com/Benjamnk/DSE-2/raw/main/cane_area_MPV_TSR_date.xlsx\")\n\n# Check for missing values\nprint(\"Missing Values:\")\nprint(df.isnull().sum())\n\n# Drop rows with missing values\ndf = df.dropna()\n\n# Handling outliers\ndef handle_outliers(data, column):\n q1 = np.percentile(data[column], 25)\n q3 = np.percentile(data[column], 75)\n iqr = q3 - q1\n lower_bound = q1 - (1.5 * iqr)\n upper_bound = q3 + (1.5 * iqr)\n data = data[(data[column] >= lower_bound) & (data[column] <= upper_bound)]\n return data\n\n# Apply outlier handling to the 'area' column\ndf = handle_outliers(df, 'area')\n\n# Reset the index after cleaning\ndf = df.reset_index(drop=True)\n\n# Verify the cleaned dataset\nprint(\"Cleaned Dataset:\")\nprint(df.head())\n\n# Drop the unwanted columns\ncolumns_to_drop = ['year', 'cane_type', 'gis_idkey', 'QT']\ndf = df.drop(columns_to_drop, axis=1)\n# dataframe จัดกลุ่มรวมยอด ตามวันที่\ndf = df.groupby('date').sum()\ndf.info\ndf\ndf.plot()\ndf.info()\nDatetimeIndex: 544 entries, 2018-10-03 to 2023-01-31\nData columns (total 1 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 area 544 non-null float64\ndtypes: float64(1)\nmemory usage: 8.5 KB\n## Time-Series Prediction\n\nTime-Series Prediction ของ PyCaret ไม่ได้ใช้บนพื้นฐานของ Scikit Learn ตามปกติที่เราทำกัน ตัวนี้สร้างบนพื้นฐานของ Facebook Phophet Engine https://facebook.github.io/prophet/\n\n![image.png](data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAJoAAAA0CAYAAABy6SGJAAABQ2lDQ1BJQ0MgUHJvZmlsZQAAKJFjYGASSSwoyGFhYGDIzSspCnJ3UoiIjFJgf8bAwcDHwMnAxWCZmFxc4BgQ4ANUwgCjUcG3awyMIPqyLsgsXtbzp+VX7HYMCuT7sftN3AJM9SiAKyW1OBlI/wHi1OSCohIGBsYUIFu5vKQAxO4AskWKgI4CsueA2OkQ9gYQOwnCPgJWExLkDGTfALIFkjMSgWYwvgCydZKQxNOR2FB7QYDbx13BLdTHR8HDhYBryQAlqRUlINo5v6CyKDM9o0TBERhKqQqeecl6OgpGBkZGDAygMIeo/nwDHJaMYhwIsUKgH608GRiYchFiCQEMDDs+gLyKEFPVYWDgOc7AcCC2ILEoEe4Axm8sxWnGRhA293YGBtZp//9/DmdgYNdkYPh7/f//39v///+7jIGB+RZQ7zcAm+9e/8SXSUYAAABWZVhJZk1NACoAAAAIAAGHaQAEAAAAAQAAABoAAAAAAAOShgAHAAAAEgAAAESgAgAEAAAAAQAAAJqgAwAEAAAAAQAAADQAAAAAQVNDSUkAAABTY3JlZW5zaG90qfldpQAAAdVpVFh0WE1MOmNvbS5hZG9iZS54bXAAAAAAADx4OnhtcG1ldGEgeG1sbnM6eD0iYWRvYmU6bnM6bWV0YS8iIHg6eG1wdGs9IlhNUCBDb3JlIDYuMC4wIj4KICAgPHJkZjpSREYgeG1sbnM6cmRmPSJodHRwOi8vd3d3LnczLm9yZy8xOTk5LzAyLzIyLXJkZi1zeW50YXgtbnMjIj4KICAgICAgPHJkZjpEZXNjcmlwdGlvbiByZGY6YWJvdXQ9IiIKICAgICAgICAgICAgeG1sbnM6ZXhpZj0iaHR0cDovL25zLmFkb2JlLmNvbS9leGlmLzEuMC8iPgogICAgICAgICA8ZXhpZjpQaXhlbFlEaW1lbnNpb24+NTI8L2V4aWY6UGl4ZWxZRGltZW5zaW9uPgogICAgICAgICA8ZXhpZjpQaXhlbFhEaW1lbnNpb24+MTU0PC9leGlmOlBpeGVsWERpbWVuc2lvbj4KICAgICAgICAgPGV4aWY6VXNlckNvbW1lbnQ+U2NyZWVuc2hvdDwvZXhpZjpVc2VyQ29tbWVudD4KICAgICAgPC9yZGY6RGVzY3JpcHRpb24+CiAgIDwvcmRmOlJERj4KPC94OnhtcG1ldGE+CuB5yzQAAATtSURBVHgB7Vo7axVBFJ4EQyJGRBEVQS2MoCSFJCoYCxECFlYqmh8gUX+AhZWFrYJt0MZOYpE/oIhNFNGIoqIRIUhMEw2KIj6C6H4XzmV277x27uwyzj0DN7s7c+Y8vvPNmdklXfuOX/kruDECFSPQXbF+Vs8INBBgojERakGAiVYLzGyEicYcqAUBJlotMLMRJhpzoBYEmGi1wMxGmGjMgVoQYKLVAjMbYaIxB2pBgIlWC8xsZBVDkEfg8IEBcebUQdHd3SVuTj8Sd2fm8gL85IUAE60A29o1vWLPzs2N3nX9fYVRfvRFgIlWQG725YKYvDUjVvf1iCcvFgqj/OiLQBf/m5AvdDyvDAIdXdGOHRkUA9s3ik+fv4s72VlsaflbGexYtgQCHUu0k0f3iovnxppQbdrQL67dvN985puwCHTs543BXVtySI4d2p175oewCHQs0b58/ZFD8t37j7lnfgiLQLJEGxnaJvDTtXsP34rnbxYbw89eLza+melkub99BJzfOh9PX9BawyeBp68WxPWpB1oZGpi8PG4kQBldpJOuZ8dHxfBgK8Gg8/ylKRJrXvt6e8SOrevF75U/Yv7DcrMfN1XEeyPDR4UR/J7Ifmg6XxuD2R9Zlvp0V509LEDkoUzbf+JqGfEW2SAVDY4DKCTHVEVarCs6fHQRcPBBZf/GbfUC+PlrRczNL7WQTOFWrsvHx5yCDnwI/tY5cXo0W5Wt1cMHW6w6l5WkWp2oDGiotHTv44NtTsh4bbZU41XGprLn2+dFNGxDcoByOcdqx08e1zmnKu2yLsyz6YK83FQ65XGf+1Dx+tg2zQHGqiOBaQ7GMK+4gOUjTRUYBtk6ce6QiYVV7tuKukayM5euFUnpChDmgcC+rXjOMvnoayO1eV4VTQUCtihKHu7baaSnrI4iAVTzm+QsLA6VrKkPC6usn6oXFZMNlzGTD/Lid9FVpUwQoiFYemtq19niVmjSh8RRQzWzNXl7sMmaxn0rInAaGSr3tmfyw6QPJAt1Vjb54DrmRTRsFbRdqFapS2WBgyCnTBbV6nTV5Rowycl2qU93pXhpTtHPqnzU+fM/9nsRzVS9XCqLDFQxaTSGFan7LEEy8pVIIPeFug8Vr24r02Hg4r9OZ7vHFxfbZWS8iKYyQMTQBa6ao+uDDpe3qTLnQiRTTmgZEqv89IkX/qqqX/PcqDJk6HPFyaCitiEvoqFqzUoHfl9yyW+JMthECpte+DCRQQU5VQJlFNt5Ew4Vr+xPp917EQ0JtpGgLJAgCrY/qjouH0LhA35ydaL5sI8xEFjWi/4GcbIx11ZFvK62bXJyvCrZ0HlS2XDp8yKai2IfGRCG3soAIEhiq1TyFitXRZ19AG/TqZsbWz8wIrxUviHWWN48g3ywVQXp0wdgUG2o4RBuW7EkCznToR1y0C0Tk+bytXoEnCtaqBJsexuiakNvkfi04GIbMiARncWIoOjXHcJN8LrYNM2nMVu8kMPWPJz5iWaTl2UbEwx/bLrkqWVk5Xmu987/JuSqkOUYARUCUW2dKge5Lw0EmGhp5DH6KJho0acoDQeZaGnkMfoomGjRpygNB5loaeQx+iiYaNGnKA0HmWhp5DH6KJho0acoDQeZaGnkMfoomGjRpygNB5loaeQx+iiYaNGnKA0HmWhp5DH6KJho0acoDQf/AVjS2x34TqZxAAAAAElFTkSuQmCC)\n\nเป็น Library การคาดการณ์ข้อมูลอนุกรมเวลาตามแบบจำลองการเติมซึ่งแนวโน้มที่ไม่เป็นเชิงเส้นเหมาะสมกับฤดูกาลประจำปี รายสัปดาห์ และรายวัน บวกกับเอฟเฟกต์วันหยุด ทำงานได้ดีที่สุดกับอนุกรมเวลาที่มีผลตามฤดูกาลที่แข็งแกร่งและข้อมูลทางประวัติศาสตร์หลายฤดูกาล \n\nPhophet มีความแข็งแกร่งต่อข้อมูลที่ขาดหายไปและการเปลี่ยนแปลงในแนวโน้ม และโดยทั่วไปแล้วจะจัดการกับค่าผิดปกติได้ดี\n\n```\nfrom pycaret.time_series import *\nexp_name = setup(data = bitcoin_df, target=\"market-price\", fh = 12)\n```ค่า Parameter ในการ Setup มีดังนี้\n\n1. *data = dataframe ที่เราต้่องการที่จะนำไป Train (จำเป็นต้องใส่)\n1. *target = Column เป้าหมาย (y) ที่เราต้่องการให้ Model เรียนรู้และสามารถ Predict ค่า target ได้อย่างแม่นยำ (จำเป็นต้องใส่)\n1. session_id = เลขประจำ session เป็นเลขอะไรก็ได้ เราสามารถใช้อ้างอิงได้ภายหลัง\n1. Transform_target = หากข้อมูลเป้าหมาย (y) ไม่เป็น Normal Distribution อาทิเช่นเอนเอียงไปด้านนึง เราควรปรับค่านี้ให้เป็น True ซึ่งระบบจะทำการแปลงข้่อมูลเป้าหมาย (y) ให้เป็น Normal Distribution ให้เรา\n1. fh = จำนวนข้อมูลที่เราต้องการที่จะให้ Model ของเรา Forecast\n1. seasonal_period: ปรับค่า Seasonal_Period ที่โมเดลใช้เป็นสมมุติฐาน\n * B, C = 5\n * D = 7\n * W = 52\n * M, BM, CBM, MS, BMS, CBMS = 12\n * SM, SMS = 24\n * Q, BQ, QS, BQS = 4\n * A, Y, BA, BY, AS, YS, BAS, BYS = 1\n * H = 24\n * T, min = 60\n * S = 60\n1. fold_strategy: การแบ่งข้อมูลในการแยก Training set / Validation set\n * 'expanding'\n * 'rolling' (same as/aliased to 'expanding')\n * 'sliding'\n\n1. fold: int, default = 3 จำนวน Fold ที่ใช้ในการ Cross Validation\n### Expanding/Rolling Windows\n![](https://i.stack.imgur.com/Zs2Xp.png)### Sliding Windows\n![](https://i.stack.imgur.com/Xd62a.png)import pandas as pd\nfrom pycaret.time_series import *\n\n# Load the dataset\ndf = pd.read_excel(\"https://github.com/Benjamnk/DSE-2/raw/main/cane_area_MPV_TSR_date.xlsx\")\n\n# Drop the unwanted columns\ncolumns_to_drop = ['year', 'cane_type', 'gis_idkey', 'QT']\ndf = df.drop(columns_to_drop, axis=1)\ndf = df.groupby('date').sum()\n# Drop duplicates from the \"date\" column\ndf = df.drop_duplicates(subset='date')\n\n# Set the frequency of the index to daily ('D')\ndf['date'] = pd.to_datetime(df['date'])\ndf = df.set_index('date').asfreq('D')\n\n# Interpolate missing values\ndf = df.interpolate()\n\n\n# Perform time series setup\nexp_name = setup(data=df, target='area', fh=12, fold=10, fold_strategy='sliding')\nimport pandas as pd\nfrom pycaret.time_series import *\n\n# Load the dataset\ndf = pd.read_excel(\"https://github.com/Benjamnk/DSE-2/raw/main/cane_area_MPV_TSR_date.xlsx\")\n\n# Drop the unwanted columns\ncolumns_to_drop = ['year', 'cane_type', 'gis_idkey', 'QT']\ndf = df.drop(columns_to_drop, axis=1)\ndf = df.groupby('date').sum()\n\n# Check for missing values\nprint(\"Missing Values:\")\nprint(df.isnull().sum())\n\n# Drop rows with missing values\ndf = df.dropna()\n\n# Handling outliers\ndef handle_outliers(data, column):\n q1 = np.percentile(data[column], 25)\n q3 = np.percentile(data[column], 75)\n iqr = q3 - q1\n lower_bound = q1 - (1.5 * iqr)\n upper_bound = q3 + (1.5 * iqr)\n data = data[(data[column] >= lower_bound) & (data[column] <= upper_bound)]\n return data\n\n# Apply outlier handling to the 'area' column\ndf = handle_outliers(df, 'area')\n\n# Reset the index after cleaning\ndf = df.reset_index(drop=True)\n\n# Verify the cleaned dataset\nprint(\"Cleaned Dataset:\")\nprint(df.head())\n\n# Perform time series setup\nexp_name = setup(data=df, target='area', fh=12, fold=10, fold_strategy='sliding')\n\ndf.head()## Explore Data\nเราสามารถใช้ Plot เพื่อดูข้อมูลได้เลยง่ายๆ\n\ndf.plot()## Compare Models\nเราสามารถใช้่คำสั่ง ``compare_models()`` เพื่อหา Model ที่ดีที่สุดในการทำ Forecasting\n* Model = ชื่อ Model\n* MAE = Mean Absolute Error (https://en.wikipedia.org/wiki/Mean_absolute_error) ยิ่งน้อยยิ่งดี\n* MSE = Mean Square Error (https://en.wikipedia.org/wiki/Mean_squared_error) ยิ่งน้อยยิ่งดี\n* RMSE = Root Mean Square Error (https://en.wikipedia.org/wiki/Root-mean-square_deviation) ยิ่งน้อยยิ่งดี\n* R2 = Coefficient of determination (https://en.wikipedia.org/wiki/Coefficient_of_determination) ยิ่งเข้าใกล้ 1 ยิ่งดี\n* RMSLE - Root Mean Squared Logaritmic Error (RMSLE) (https://hrngok.github.io/posts/metrics/#:~:text=Root%20Mean%20Squared%20Logaritmic%20Error%20(RMSLE)&text=It%20is%20the%20Root%20Mean,possible%200%20(zero)%20values.) ยิ่งน้อยยิ่งดี\n* MAPE - Mean absolute percentage error\n (https://en.wikipedia.org/wiki/Mean_absolute_percentage_error) ยิ่งน้อยยิ้งดี\n* TT - Time Taken (Sec) เวลาในการใช้best = compare_models(fold=1, round=1)## Create Model\n\nเราสามารถใช้่คำสั่ง create_model(\"{ชื่อ_Model}\") เพื่อสร้าง model ตามที่เราต้องการ\n\n* 'naive' - Naive Forecaster\n* 'grand_means' - Grand Means Forecaster\n* 'snaive' - Seasonal Naive Forecaster (disabled when seasonal_period = 1)\n* 'polytrend' - Polynomial Trend Forecaster\n* 'arima' - ARIMA family of models (ARIMA, SARIMA, SARIMAX)\n* 'auto_arima' - Auto ARIMA\n* 'exp_smooth' - Exponential Smoothing\n* 'croston' - Croston Forecaster\n* 'ets' - ETS\n* 'theta' - Theta Forecaster\n* 'tbats' - TBATS\n* 'bats' - BATS\n* 'prophet' - Prophet Forecaster\n* 'lr_cds_dt' - Linear w/ Cond. Deseasonalize & Detrending\n* 'en_cds_dt' - Elastic Net w/ Cond. Deseasonalize & Detrending\n* 'ridge_cds_dt' - Ridge w/ Cond. Deseasonalize & Detrending\n* 'lasso_cds_dt' - Lasso w/ Cond. Deseasonalize & Detrending\n* 'lar_cds_dt' - Least Angular Regressor w/ Cond. Deseasonalize & Detrending\n* 'llar_cds_dt' - Lasso Least Angular Regressor w/ Cond. Deseasonalize & Detrending\n* 'br_cds_dt' - Bayesian Ridge w/ Cond. Deseasonalize & Deseasonalize & Detrending\n* 'huber_cds_dt' - Huber w/ Cond. Deseasonalize & Detrending\n* 'par_cds_dt' - Passive Aggressive w/ Cond. Deseasonalize & Detrending\n* 'omp_cds_dt' - Orthogonal Matching Pursuit w/ Cond. Deseasonalize & Detrending\n* 'knn_cds_dt' - K Neighbors w/ Cond. Deseasonalize & Detrending\n* 'dt_cds_dt' - Decision Tree w/ Cond. Deseasonalize & Detrending\n* 'rf_cds_dt' - Random Forest w/ Cond. Deseasonalize & Detrending\n* 'et_cds_dt' - Extra Trees w/ Cond. Deseasonalize & Detrending\n* 'gbr_cds_dt' - Gradient Boosting w/ Cond. Deseasonalize & Detrending\n* 'ada_cds_dt' - AdaBoost w/ Cond. Deseasonalize & Detrending\n* 'lightgbm_cds_dt' - Light Gradient Boosting w/ Cond. Deseasonalize & Detrending\n* 'catboost_cds_dt' - CatBoost w/ Cond. Deseasonalize & Detrending\nada_cds_dt_model = create_model(\"ada_cds_dt\")## Predictionpredictions = predict_model(ada_cds_dt_model)\npredictions\ndf[-30:]\npredictions.insert(1, \"area\", df[-30:], True)\n\npredictions\npredictions.plot()## Plot Model\n\n``plot_model({model}, plot={plot_name})``\n\ndefault is changed to 'forecast'. List of available plots (ID - Name):\n* 'ts' - Time Series Plot\n* 'train_test_split' - Train Test Split\n* 'cv' - Cross Validation\n* 'acf' - Auto Correlation (ACF)\n* 'pacf' - Partial Auto Correlation (PACF)\n* 'decomp' - Classical Decomposition\n* 'decomp_stl' - STL Decomposition\n* 'diagnostics' - Diagnostics Plot\n* 'diff' - Difference Plot\n* 'periodogram' - Frequency Components (Periodogram)\n* 'fft' - Frequency Components (FFT)\n* 'ccf' - Cross Correlation (CCF)\n* 'forecast' - \"Out-of-Sample\" Forecast Plot\n* 'insample' - \"In-Sample\" Forecast Plot\n* 'residuals' - Residuals Plotplot_model(ada_cds_dt_model, \"ts\")\nplot_model(ada_cds_dt_model, \"forecast\")\nplot_model(arima_model, \"train_test_split\")\ntuned_arima_model = tune_model(arima_model)### Finalize, Save Model and Load Model\n\nเมื่อเราได้ Model ที่พร้อมแล้ว เราสามารถทำ ``finalize_model({Model})`` **เพื่อทำการ Train Model บน Dataset ทั้่งหมด (ALL) ใหม่** เพื่อให้โมเดลได้เรียนรู้บน Data ให้ได้มากที่สุดบนรูปแบบ Estimator (Model) และ Hyperparamters ที่เราได้เลือกและได้กำหนดค่าไว้ในตัวแปร {Model}\n\n**Save model**\n\n``save_model({Final_Model}, \"ชื่อไฟล์\")``\nและ\n\n**Load Model**\n\n``{Model} = load_model(\"ชื่อไฟล์\")``\nเหมือนตัวอื่นๆfinal_best = finalize_model(best)\nsave_model(final_best, \"snaive_bitcoin_best\")\nloaded_snaive_bitcoin = load_model(\"snaive_bitcoin_best\")\nloaded_snaive_bitcoin## Exercise\n\nเรามาลอง Predict ข้อมูลที่มี Pattern ชัดเจนมากกว่า Bitcoin กันเถอะ ข้อมูลคุณภาพอากาศ\n\n![](https://www.researchgate.net/profile/Irena-Markovska-2/publication/340899809/figure/fig2/AS:883882824065027@1587745495992/Dependences-between-criterion-COGT-and-criteria-PT08S1CO-C6H6GT-PT08S2NMHC.ppm)\n\nปริมาณสารพิษในอากาศที่จับได้ ประกอบไปด้วย\n* True hourly averaged concentration CO in mg/m3 - CO(GT)\n* PT08.S1 (tin oxide) hourly averaged sensor response - PT08.S1(CO)\n* True hourly averaged Benzene concentration in microg/m3 - C6H6(GT)\n* PT08.S2 (titania) hourly averaged sensor response - PT08.S2(NMHC)\n* True hourly averaged NOx concentration in ppb - NOx(GT)\n* PT08.S3 (tungsten oxide) hourly averaged sensor response - PT08.S3(NOx)* True hourly averaged NO2 concentration in microg/m3 - NO2(GT);8) \n* PT08.S4 (tungsten oxide) hourly averaged sensor response - PT08.S4(NO2)\n* PT08.S5 (indium oxide) hourly averaged sensor response - PT08.S5(O3);\n* Temperature in °C - T\n* Relative Humidity (%) - RH\n* Absolute Humidity - AH \n\n(PDF) Application of the InterCriteria Analysis Over Air Quality Data. Available from: https://www.researchgate.net/publication/318029588_Application_of_the_InterCriteria_Analysis_Over_Air_Quality_Data [accessed Aug 14 2022].\nfrom pycaret.datasets import get_data\nairquality = get_data('airquality')\nairquality.info()\n# รวม Date+Time เป็น datetime\nairquality[\"datetime\"] = pd.to_datetime(airquality.Date.astype(str) + ' ' + airquality.Time.astype(str))\nairquality = airquality.drop([\"Date\",\"Time\"], axis=1)\nairquality\n# dataframe จัดกลุ่มรวมยอด ตามวันที่\nairquality=airquality.groupby('datetime',as_index=True).sum()\n#แปลง index ที่เป็นอยู่ date ให้เป็น date_time\nairquality.index=pd.to_datetime(airquality.index)\nairquality\nfrom pycaret.time_series import *\nexp_name = setup(data = airquality, target=\"CO(GT)\", fh = 12)\n# Start your work here....# Clusteringคือการจัดกลุ่มข้อมูลโดยอัตโนมัติ เป็นรูปแบบ Unsupervised Learning โดยไม่ต้องมีค่า y (target class) มักจะใช้ในงานจัดกลุ่มลูกค้า (Customers Segmentation) หรือจัดกลุ่มเนื้อหาเอกสารหรือบทความต่างๆ (Document Grouping) เป็นต้นimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport matplotlib as mpl\nimport seaborn as sns\nfrom pycaret.clustering import *\nfrom sklearn.datasets import make_blobs \nmpl.rcParams['figure.dpi'] = 100## Synthetic Dataset\n\nรอบนี้เราลองสร้าง Dataset ปลอมๆ ขึ้นมาเอง เพื่อให้รู้ว่าการสร้าง Dataset ไม่จำเป็นต้องมาจากที่มีอยู่แล้วก็ได้ # Generating dataset\n\ncols = ['column1', 'column2', 'column3',\n 'column4', 'column5']\narr = make_blobs(n_samples = 1000, n_features = 5, random_state = 20,\n centers = 3, cluster_std = 1)\ndata = pd.DataFrame(data = arr[0], columns = cols)\ndata.head()ในรอบนี้ เราสร้างข้อมูลจำนวน 1000 แถว มี 5 Features (Columns) โดยมีกระจุกตัวกันอยู่ 3 กลุ่ม (Clusters) การใช้ชุดข้อมูลสังเคราะห์เพื่อทดสอบโมเดลการทำคลัสเตอร์ของเรามีประโยชน์หลายประการ ข้อได้เปรียบหลักคือเราทราบจำนวนคลัสเตอร์จริงแล้ว ดังนั้นเราจึงสามารถประเมินประสิทธิภาพของโมเดลได้อย่างง่ายดาย โดยทั่วไป ข้อมูลในโลกแห่งความเป็นจริงจะซับซ้อนกว่าเนื่องจากไม่มีคลัสเตอร์ที่แยกจากกันอย่างชัดเจนเสมอไป## Explore Data\nเราลองมาดู Data เรากันว่าหน้าตาเป็นอย่างไร### Histogram\nเนื่องจากมี 3 Clusters ที่กระจุกตัวแตกต่างกัน ทำให้เราเห็นข้อมูลในแต่ละ Column มีจุด Peak อยู่ 2-3 จุด หรือเรียกว่า (bimodal หรือ multimodal) # Plotting histogram\n\ndata.hist(bins = 30, figsize = (10,7), grid = False) \nplt.show()### Color-encoded Matrix\n\nดูความสัมพันธ์ระหว่าง Feature X ด้วย ``corr()`` และสร้างเป็น Heatmap มาให้ดู\n* 1 ไปด้วยกันเสมอ\n* -1 สวนทางกันเสมอ# Plotting color-encoded matrix\n\nplt.figure(figsize=(8, 6)) \nsns.heatmap(data.corr().round(decimals=2), annot=True)\nplt.show()สังเกตุได้ว่า Col2, Col3 จะมี Correlation ใกล้เคียงกันสูง (แต่อย่างไรก็ตามก็ขึ้นกับการ Random ณ ขณะนั้่นด้่วย)### การดู Scatter plot เพื่อดู ความสัมพันธ์ระหว่าง Feature Xsns.pairplot(data)\nplt.show()## Clustering Prediction\n\nการ Setup ของ Clustering จะมีความแตกต่างจากตัวอื่นๆ ดังนี้\n\n1. *data = dataframe ที่เราต้่องการที่จะนำไป Train (จำเป็นต้องใส่)\n1. session_id = เลขประจำ session เป็นเลขอะไรก็ได้ เราสามารถใช้อ้างอิงได้ภายหลัง\n1. pca: bool, default = False = เปิดการใช้ PCA เพื่อลดจำนวน Features ลงหรือไม่\n1. pca_method: str, default = 'linear' วิธีการทำ PCA, Possible values are:\n - 'linear': Uses Singular Value Decomposition.\n - 'kernel': Dimensionality reduction through the use of RBF kernel.\n - 'incremental': Similar to 'linear', but more efficient for large datasets.\n1. pca_components: int, float, str or None, default = None จำนวนของ pca ที่ต้องการเก็บไว้. This parameter is ignored when `pca=False`.\n - If None: All components are kept.\n - If int: Absolute number of components.\n - If \"mle\": Minka’s MLE is used to guess the dimension (ony for pca_method='linear').\n1. Transformation = หากข้อมูลไม่เป็น Normal Distribution อาทิเช่นเอนเอียงไปด้านนึง เราควรปรับค่านี้ให้เป็น True ซึ่งระบบจะทำการแปลงข้่อมูลให้เป็น Normal Distribution ให้เรา\n1. Normalize = ทำการแปลงค่า numeric_features ทั้งหมดให้อยู่ระหว่าง -e..e โดยใช้หลักการของ z-score = (x - mean) / standard deviation\nfrom pycaret.clustering import *\n# PyCaret environment setup.Setting different parameters in setup() function\n# to prepare model training and deployment data.\n\ncluster = setup(data, session_id = 7652)INFO:logs:PyCaret ClusteringExperiment\nINFO:logs:Logging name: cluster-default-name\nINFO:logs:ML Usecase: MLUsecase.CLUSTERING\nINFO:logs:version 3.0.0.rc3\nINFO:logs:Initializing setup()\nINFO:logs:self.USI: f6c1\nINFO:logs:self.variable_keys: {'exp_id', '_gpu_n_jobs_param', 'display_container', 'memory', 'X', '_all_metrics', 'variable_keys', 'exp_name_log', 'n_jobs_param', 'USI', 'logging_param', '_available_plots', '_ml_usecase', 'log_plots_param', '_all_models_internal', 'master_model_container', '_all_models', 'idx', 'seed', 'data', 'pipeline', 'gpu_param', 'html_param'}\nINFO:logs:Checking environment\nINFO:logs:python_version: 3.7.13\nINFO:logs:python_build: ('default', 'Apr 24 2022 01:04:09')\nINFO:logs:machine: x86_64\nINFO:logs:platform: Linux-5.4.188+-x86_64-with-Ubuntu-18.04-bionic\nINFO:logs:Memory: svmem(total=13617745920, available=11611111424, percent=14.7, used=1977847808, free=9328275456, active=2347229184, inactive=1563693056, buffers=221483008, cached=2090139648, shared=13148[...]## Compare Model?\nCompare Model ไม่สามารถทำได้ เพราะเนื่องจากเราไม่มีเฉลย เพราะฉะนั้นเราต้องไล่ Create_model ไปแล้วดูผลของการจัดกลุ่มด้วยตาของมนุษย์แทน## Create Model\nสร้าง Model ด้วยคำสั่ง ``create_model({ชื่อ Model}, num_clusters={จำนวน Cluster})`` หากเราไม่กำหนด ``num_clusters`` จะมีค่า Default = 4\n\n* 'kmeans' - K-Means Clustering\n* 'ap' - Affinity Propagation\n* 'meanshift' - Mean shift Clustering\n* 'sc' - Spectral Clustering\n* 'hclust' - Agglomerative Clustering\n* 'dbscan' - Density-Based Spatial Clustering\n* 'optics' - OPTICS Clustering\n* 'birch' - Birch Clustering\n* 'kmodes' - K-Modes Clusteringkmeans_model = create_model(\"kmeans\")INFO:logs:Initializing create_model()\nINFO:logs:create_model(self=, estimator=kmeans, num_clusters=4, fraction=0.05, ground_truth=None, round=4, fit_kwargs=None, experiment_custom_tags=None, verbose=True, system=True, add_to_model_list=True, raise_num_clusters=False, display=None, kwargs={})\nINFO:logs:Checking exceptions\n### ค่า Error ของการทำ Clustering\n\nค่า Error จากการทำ Clustering นั้น ไม่ได้เกิดจาก y label (เฉลย) ซึ่งข้อมูลนี้มันไม่มี แต่มันเกิดจากพอเค้าลอง Assign Cluster ให้แต่ละ Dataset แล้วดูว่าคุณลักษณะของ Cluster นั้น ดีหรือแย่อย่างไร อาทิเช่น ถ้าทุกจุดเกาะกลุ่มกันแน่นจนเป็นจุดเดียวกัน = ดี , แต่ถ้าหากกระจายไปปนกับ cluster อื่นๆ = ไม่ดี โดยมีหลักในการดูดังนี้\n\n* Silhouette - มีค่าระหว่าง -1 ถึง 1 - ยิ่งเยอะยิ่งดี ถ้า 1 ทุกจุดใน Cluster มีคุณลักษณะตรงกับจุดอื่นๆใน Cluster - https://en.wikipedia.org/wiki/Silhouette_(clustering)\n* Calinski-Harabasz - สัดส่วนระยะห่าง (Distance Ratio) ของจุดระหว่างจุดใน Cluster กับ Cluster อื่นๆ หารด้วย จุดใน Cluster กับ Cluster ของตน - คะแนนยิ่งเยอะยิ่งดี \n* Davies-Bouldin - สัดส่วนระยะห่าง (Distance Ratio) ของ จุดใน Cluster กับ Cluster ของตน หารด้วย จุดระหว่างจุดใน Cluster กับ Cluster อื่นๆ - คะแนนยิ่งน้อยยิ่งดี\n* Homogeneity - มีค่าระหว่าง 0 ถึง 1 - ยิ่งเยอะยิ่งดี - ในกรณีมี Class y เราสามารถวัดได้ว่า มีจุดไหนที่ถูก Cluster ได้มากกว่า 2 จุดหรือไม่ ถ้าไม่มีเลย แสดงว่าแบ่ง Cluster ได้ดี ค่่า Homongenity = 1.0 \n* Rand Index - มีค่าระหว่าง 0 ถึง 1 - ยิ่งเยอะยิ่งดี - ในกรณีมี Class y ความแม่นยำของการเลือก Cluster ที่ถูกต้องให้แต่ละจุด (เหมือน Accuracy ใน Classification)\n* Completeness - มีค่าระหว่าง 0 ถึง 1 - ยิ่งเยอะยิ่งดี - ในกรณีมี Class y จำนวนจุดในข้อมูลที่เฉลยหากได้อยู่ Cluster เดียวกันหมดได้จะค่าเป็น 1 ดูว่าสามารถเก็บมาครบได้หรือไม่ (เหมือน Recall ใน Classification)## Plot Model\n\n``plot_model({ModelVar}, '{PlotName}')``\n\n* 'cluster' - Cluster PCA Plot (2d)\n* 'tsne' - Cluster t-SNE (3d)\n* 'elbow' - Elbow Plot\n* 'silhouette' - Silhouette Plot\n* 'distance' - Distance Plot\n* 'distribution' - Distribution Plot### Elbow Plot\nใช้หาจำนวน Cluster ที่ดีที่สุดบน Dataset# Plotting the model\n \nplot_model(kmeans_model, 'elbow')INFO:logs:Initializing plot_model()\nINFO:logs:plot_model(plot=elbow, fold=None, use_train_data=False, verbose=True, display=None, display_format=None, estimator=KMeans(algorithm='auto', copy_x=True, init='k-means++', max_iter=300,\n n_clusters=4, n_init=10, random_state=7652, tol=0.0001, verbose=0), feature_name=None, fit_kwargs=None, groups=None, label=False, plot_kwargs=None, save=False, scale=1, self=, system=True)\nINFO:logs:Checking exceptions\nเมื่อเรารู้แล้่วว่าจำนวน Cluster ที่ดีที่สุดคือ 3 เราก็ทำการ Train ใหม่ให้แม่นยำขึ้นkmeans_model = create_model('kmeans', num_clusters = 3)INFO:logs:Initializing create_model()\nINFO:logs:create_model(self=, estimator=kmeans, num_clusters=3, fraction=0.05, ground_truth=None, round=4, fit_kwargs=None, experiment_custom_tags=None, verbose=True, system=True, add_to_model_list=True, raise_num_clusters=False, display=None, kwargs={})\nINFO:logs:Checking exceptions\nสังเกตุได้ว่า Error Matrices ทุกตัวดีขึ้น อย่างมีนัยสำคัญ### PCA Plot\n\nทำการ Dimensionally Reduction โดยการใช้ Principal Component Analysis (PCA) จาก 5 มิติ -> 2 มิติ เพื่อให้ทำการแยกแยะง่ายขึ้น# Plotting PCA plot\n\nplot_model(kmeans_model, 'cluster')INFO:logs:Initializing plot_model()\nINFO:logs:plot_model(plot=cluster, fold=None, use_train_data=False, verbose=True, display=None, display_format=None, estimator=KMeans(algorithm='auto', copy_x=True, init='k-means++', max_iter=300,\n n_clusters=3, n_init=10, random_state=7652, tol=0.0001, verbose=0), feature_name=None, fit_kwargs=None, groups=None, label=False, plot_kwargs=None, save=False, scale=1, self=, system=True)\nINFO:logs:Checking exceptions\n### TSNE Plot# Plot แบบ 3 มิติด้่วย tsne แต่ใช้เวลารันนาน\nplot_model(kmeans_model, 'tsne')INFO:logs:Initializing plot_model()\nINFO:logs:plot_model(plot=tsne, fold=None, use_train_data=False, verbose=True, display=None, display_format=None, estimator=KMeans(algorithm='auto', copy_x=True, init='k-means++', max_iter=300,\n n_clusters=3, n_init=10, random_state=7652, tol=0.0001, verbose=0), feature_name=None, fit_kwargs=None, groups=None, label=False, plot_kwargs=None, save=False, scale=1, self=, system=True)\nINFO:logs:Checking exceptions\n### Silhouette Plotplot_model(kmeans_model, 'silhouette')INFO:logs:Initializing plot_model()\nINFO:logs:plot_model(plot=silhouette, fold=None, use_train_data=False, verbose=True, display=None, display_format=None, estimator=KMeans(algorithm='auto', copy_x=True, init='k-means++', max_iter=300,\n n_clusters=3, n_init=10, random_state=7652, tol=0.0001, verbose=0), feature_name=None, fit_kwargs=None, groups=None, label=False, plot_kwargs=None, save=False, scale=1, self=, system=True)\nINFO:logs:Checking exceptions\n### Distance Plotplot_model(kmeans_model, 'distance')INFO:logs:Initializing plot_model()\nINFO:logs:plot_model(plot=distance, fold=None, use_train_data=False, verbose=True, display=None, display_format=None, estimator=KMeans(algorithm='auto', copy_x=True, init='k-means++', max_iter=300,\n n_clusters=3, n_init=10, random_state=7652, tol=0.0001, verbose=0), feature_name=None, fit_kwargs=None, groups=None, label=False, plot_kwargs=None, save=False, scale=1, self=, system=True)\nINFO:logs:Checking exceptions\n### Distribution Plotplot_model(kmeans_model, 'distribution')INFO:logs:Initializing plot_model()\nINFO:logs:plot_model(plot=distribution, fold=None, use_train_data=False, verbose=True, display=None, display_format=None, estimator=KMeans(algorithm='auto', copy_x=True, init='k-means++', max_iter=300,\n n_clusters=3, n_init=10, random_state=7652, tol=0.0001, verbose=0), feature_name=None, fit_kwargs=None, groups=None, label=False, plot_kwargs=None, save=False, scale=1, self=, system=True)\nINFO:logs:Checking exceptions\n## Evaluate Modelevaluate_model(kmeans_model)INFO:logs:Initializing evaluate_model()\nINFO:logs:evaluate_model(self=, estimator=KMeans(algorithm='auto', copy_x=True, init='k-means++', max_iter=300,\n n_clusters=3, n_init=10, random_state=7652, tol=0.0001, verbose=0), fold=None, fit_kwargs=None, plot_kwargs=None, feature_name=None, groups=None, use_train_data=False)\n## เรามาลอง Setup ใหม่แบบเปิดให้ PCA ให้เราได้กันเถอะ จะได้รู้ว่าดีขึ้นแค่ไหนfrom pycaret.clustering import *\n# PyCaret environment setup.Setting different parameters in setup() function\n# to prepare model training and deployment data.\n\ncluster = setup(data, session_id = 7653, pca=True, pca_method=\"linear\")\nnew_kmeans_model = create_model('kmeans', num_clusters = 3)INFO:logs:Initializing create_model()\nINFO:logs:create_model(self=, estimator=kmeans, num_clusters=3, fraction=0.05, ground_truth=None, round=4, fit_kwargs=None, experiment_custom_tags=None, verbose=True, system=True, add_to_model_list=True, raise_num_clusters=False, display=None, kwargs={})\nINFO:logs:Checking exceptions\nไม่ได้ช่วย แสดงว่าโจทย์นี้ยังไม่ซับซ้อนเท่าไร่ กระจายตัวกันสวยอยู่แล้วใน 5-Dimension อยู่แล้ว จะแยกใน 2-Dimension ก็ไม่ต่างกัน## Assign Model\nเหมือน Prediction แต่ว่าเนื่องจากเป็น Clustering ที่ไม่มี Y จะใช้คำว่า Prediction ไม่ได้ เลยใช้คำว่า Assign แทน , คืนค่า Cluster Assigment ของ Training Dataset มาให้ทั้งหมด\n``assign_model({ModelVar})`` assignments = assign_model(kmeans_model)\nassignments## Finalize, Save Model and Load Model\nเหมือนกับของทุกๆ Library ครับ## Clustering Exercise\n* ลอง Clustering กับ Data จริง ชื่อว่า ``jewellery`` ฐานข้อมูลสมาขิกลูกค้าที่เข้ามาซื้อเครื่องประดับของร้านค้าประกอบไปด้วย\n\n1. age - อายุ\n1. income - รายได้ (USD)\n1. SpendingScore - คะแนนการใช้จ่ายในร้านค้า ยิ่งเยอะยิ่งใช้จ่ายเยอะ\n1. Savings - เงินเก็บที่มีทั้งหมด (USD)\n\nลองหาดูว่าลูกค้าของร้านนี้มีกี่กลุ่ม และแต่ละกลุ่มมีจุดเด่นเรื่องอะไรบ้างfrom pycaret.datasets import get_data\njewellery = get_data('jewellery')\njewellery.info()\nfrom pycaret.clustering import *\n#exp = setup(data = jewellery) << Start working here."},"license":{"kind":"string","value":"no_license"},"path":{"kind":"string","value":"/time_serie_PyCaret_Ferti_CT.ipynb"},"repo_name":{"kind":"string","value":"Benjamnk/DSE-2"},"chain_length":{"kind":"number","value":30,"string":"30"}}},{"rowIdx":4898,"cells":{"content":{"kind":"string","value":"Here I am going to apply Principal component analysis on the given dataset using Scikit-learn and find out the dimensions(also known as components) with maximum variance(where the data is spread out).Features with little variance in the data are then projected into new lower dimension. Then the models are trained on transformed dataset to apply machine learning models.Then I have applied Random forest Regressor on old and the transformed datasets and compared them.\nIf you want to know the basic concept behind Principal Component Analysis check this out.\n(https://www.kaggle.com/nirajvermafcb/d/ludobenistant/hr-analytics/principal-component-analysis-explained)# This Python 3 environment comes with many helpful analytics libraries installed\n# It is defined by the kaggle/python docker image: https://github.com/kaggle/docker-python\n# For example, here's several helpful packages to load in \n\nimport numpy as np # linear algebra\nimport pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)\n\n# Input data files are available in the \"../input/\" directory.\n# For example, running this (by clicking run or pressing Shift+Enter) will list the files in the input directory\n\nfrom subprocess import check_output\nprint(check_output([\"ls\", \"../input\"]).decode(\"utf8\"))\n\n# Any results you write to the current directory are saved as output.\nimport numpy as np # linear algebra\nimport pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n%matplotlib inline\ndf=pd.read_csv('../input/data.csv') #Replace it with your path where the data file is stored\ndf.head()\ndf.describe()\ndf.corr()Let us find if there is any relationship between temperature and apparent_temperaturex=df['temperature']\ny=df['apparent_temperature']\ncolors=('r','b')\nplt.xlabel('Temperature')\nplt.ylabel('Apparent_temperature')\nplt.scatter(x,y,c=colors)The temperture given here is in fahrenheit.We will convert it into Celsius using the formula \n**Celsius=(Fahrenheit-32)* (5/9)**Fahrenheit=df['temperature']Converting it into the list so we can apply lambda functionF=Fahrenheit.tolist()Applying Lambda functionC= map(lambda x: (float(5)/9)*(x-32),F)\nCelsius=(list(C))Converting list to seriestemperature_celsius=pd.Series(Celsius)Applying the series to temperature columndf['temperature']= temperature_celsius\ndf['temperature']\ndf.head()Thus we have converted the temperature column from fahrenheit to degree celsius.Similarly we are now converting apparent_temperature to degree celsius.at_fahrenheit=df['apparent_temperature']\nat_F=at_fahrenheit.tolist()\nat_C= map(lambda x: (float(5)/9)*(x-32),at_F)\nat_Celsius=(list(C))\nat_celsius=pd.Series(at_Celsius)\nat_celsius\napparent_temperature_celsius=pd.Series(at_Celsius)\nprint(apparent_temperature_celsius)\ndf['apparent_temperature']= temperature_celsius\ndf['apparent_temperature']\ndf.head()\nX = df.iloc[:,1:8] # all rows, all the features and no labels\ny = df.iloc[:, 0] # all rows, label only\n#X\n#y\ndf.corr()\ncorrelation = df.corr()\nplt.figure(figsize=(10,10))\nsns.heatmap(correlation, vmax=1, square=True,annot=True,cmap='viridis')\n\nplt.title('Correlation between different fearures')Standardising data# Scale the data to be between -1 and 1\nfrom sklearn.preprocessing import StandardScaler\nscaler = StandardScaler()\nX=scaler.fit_transform(X)\nX\nfrom sklearn.decomposition import PCA\npca = PCA()\npca.fit_transform(X)\npca.get_covariance()\nexplained_variance=pca.explained_variance_ratio_\nexplained_variance\nwith plt.style.context('dark_background'):\n plt.figure(figsize=(6, 4))\n\n plt.bar(range(7), explained_variance, alpha=0.5, align='center',\n label='individual explained variance')\n plt.ylabel('Explained variance ratio')\n plt.xlabel('Principal components')\n plt.legend(loc='best')\n plt.tight_layout()**Thus we can see from the above plot that first two components constitute almost 55% of the variance.Third,fourth and fifth components has 42% of the data sprad.The last component has less than 5% of the variance.Hence we can drop the fifth component **pca=PCA(n_components=5)\nX_new=pca.fit_transform(X)\nX_new\npca.get_covariance()\nexplained_variance=pca.explained_variance_ratio_\nexplained_variance\nwith plt.style.context('dark_background'):\n plt.figure(figsize=(6, 4))\n\n plt.bar(range(5), explained_variance, alpha=0.5, align='center',\n label='individual explained variance')\n plt.ylabel('Explained variance ratio')\n plt.xlabel('Principal components')\n plt.legend(loc='best')\n plt.tight_layout()\nfrom sklearn.model_selection import train_test_split\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=1)\nX_train.shape\n# Establish model\nfrom sklearn.ensemble import RandomForestRegressor\nmodel = RandomForestRegressor()\n# Try different numbers of n_estimators - this will take a minute or so\nestimators = np.arange(10, 200, 10)\nscores = []\nfor n in estimators:\n model.set_params(n_estimators=n)\n model.fit(X_train, y_train)\n scores.append(model.score(X_test, y_test))\nprint(scores) \nplt.title(\"Effect of n_estimators\")\nplt.xlabel(\"n_estimator\")\nplt.ylabel(\"score\")\nplt.plot(estimators, scores)\nfrom sklearn.model_selection import train_test_split\nX_train, X_test, y_train, y_test = train_test_split(X_new, y, test_size=0.2, random_state=1)\nX_train.shape\n# Establish model\nfrom sklearn.ensemble import RandomForestRegressor\nmodel = RandomForestRegressor()\n# Try different numbers of n_estimators - this will take a minute or so\nestimators = np.arange(10, 200, 10)\nscores = []\nfor n in estimators:\n model.set_params(n_estimators=n)\n model.fit(X_train, y_train)\n scores.append(model.score(X_test, y_test))\nprint(scores) \nplt.title(\"Effect of n_estimators\")\nplt.xlabel(\"n_estimator\")\nplt.ylabel(\"score\")\nplt.plot(estimators, scores)"},"license":{"kind":"string","value":"no_license"},"path":{"kind":"string","value":"/notebook/principal-component-analysis-with-scikit-learn.ipynb"},"repo_name":{"kind":"string","value":"rohitsabu08/spider"},"chain_length":{"kind":"number","value":10,"string":"10"}}},{"rowIdx":4899,"cells":{"content":{"kind":"string","value":"##### quaternion to rotation matrix\n\n$ R_q = \\begin{bmatrix} 1-2cc-2dd & 2bc-2ad & 2bd+2ac \\\\ 2bc+2ad & 1-2bb-2dd & 2cd-2ab \\\\ 2bd-2ac & 2cd+2ab & 1-2bb-2cc \\end{bmatrix} $\n\n[AA](AxisAngleAlternative.md)\n##### quaternion to rotation matrix\n\n$\\color{red}{ R_q = \\overbrace{\\underbrace{\n\\begin{bmatrix}1&0&0\\\\0&1&0\\\\0&0&1\\end{bmatrix}\n}_{\\textstyle I}}^{\\textstyle 1} + \\overbrace{\\underbrace{\n\\begin{bmatrix}-\\ c^2-d^2&bc&bd\\\\bc&-\\ b^2-d^2&cd\\\\bd&cd&-\\ b^2-c^2\\end{bmatrix}\n}_{\\textstyle ([u]_{\\times})^2}}^{\\textstyle 2} + \\overbrace{\\underbrace{\n\\begin{bmatrix}0&-d&c\\\\d&0&-b\\\\-c&b&0\\end{bmatrix}\n}_{\\textstyle [u]_{\\times}}}^{\\textstyle 2a} }$\n\n$\\color{red}{ R_q = I + 2([u]_{\\times})^2 + 2a[u]_{\\times} }$\n\n12/12 or 9/15\n\n$ R_q = I + 2[u]_{\\times}([u]_{\\times} + aI) $\ndef matrix():\n # temp\n tb = b * 2; tc = c * 2; td = d * 2; # \n ab = tb * a; ac = tc * a; ad = td * a; # cross\n bb = tb * b; cc = tc * c; dd = td * d; # dot\n bc = tb * c; bd = tb * d; cd = tc * d; # dot\n # matrix elements\n xx = 1 - cc - dd; xy = bc - ad; xz = bd + ac; # x' row\n yx = bc + ad; yy = 1 - bb - dd; yz = cd - ab; # y' row\n zx = bd - ac; zy = cd + ab; zz = 1 - bb - cc; # z' row\n # 12 multiplications\n # 12 additions/subtractions##### quaternion to rotate vector\n\n$ \\vec{v'} = q\\vec{v}q^{-1} = (a+\\vec{u}) \\ \\vec{v} \\ (a-\\vec{u}) = \\vec{v} + 2\\vec{u}\\times(\\vec{u}\\times\\vec{v}) + 2a(\\vec{u}\\times\\vec{v}) $\ndef rotate():\n # temp\n tx = (c * z - d * y) * 2;\n ty = (d * x - b * z) * 2;\n tz = (b * y - c * x) * 2;\n # vector\n _x = x + (c * tz - d * ty) + a * tx; # x'\n _y = y + (d * tx - b * tz) + a * ty; # y'\n _z = z + (b * ty - c * tx) + a * tz; # z'\n # 18 multiplications\n # 12 additions/subtractions##### quaternion to rotate vector\n\n$ \\vec{v'} = q\\vec{v}q^{-1} = (a+\\vec{u}) \\ \\vec{v} \\ (a-\\vec{u}) = \\vec{v} + 2\\vec{u}\\times(\\vec{u}\\times\\vec{v}+a\\vec{v}) $\ndef rotate():\n # temp\n tx = (c * z - d * y) + a * x;\n ty = (d * x - b * z) + a * y;\n tz = (b * y - c * x) + a * z;\n # vector\n _x = x + (c * tz - d * ty) * 2; # x'\n _y = y + (d * tx - b * tz) * 2; # y'\n _z = z + (b * ty - c * tx) * 2; # z'\n # 18 multiplications\n # 12 additions/subtractions"},"license":{"kind":"string","value":"no_license"},"path":{"kind":"string","value":"/math/Quaternion.ipynb"},"repo_name":{"kind":"string","value":"sultan/test"},"chain_length":{"kind":"number","value":3,"string":"3"}}}],"truncated":false,"partial":false},"paginationData":{"pageIndex":48,"numItemsPerPage":100,"numTotalItems":5000,"offset":4800,"length":100}},"jwt":"eyJhbGciOiJFZERTQSJ9.eyJyZWFkIjp0cnVlLCJwZXJtaXNzaW9ucyI6eyJyZXBvLmNvbnRlbnQucmVhZCI6dHJ1ZX0sImlhdCI6MTc1NjEyMDM4Niwic3ViIjoiL2RhdGFzZXRzL2xvdWJuYWJubC9zYW1wbGVfanVweXRlcl9zdHJ1Y3R1cmVkIiwiZXhwIjoxNzU2MTIzOTg2LCJpc3MiOiJodHRwczovL2h1Z2dpbmdmYWNlLmNvIn0._9xuhiiB2feeO6asaxDaaD0B9mNzutFUvi1tua9idPVoTwrMczjSu8yslGBut_KKclZjdDu1qmvk7CSy08VoBw","displayUrls":true},"discussionsStats":{"closed":0,"open":0,"total":0},"fullWidth":true,"hasGatedAccess":true,"hasFullAccess":true,"isEmbedded":false,"savedQueries":{"community":[],"user":[]}}">
content
stringlengths
73
1.12M
license
stringclasses
3 values
path
stringlengths
9
197
repo_name
stringlengths
7
106
chain_length
int64
1
144
<jupyter_start><jupyter_text><jupyter_code>from google.colab import drive drive.mount('/content/drive') import os os.chdir(r'/content/drive/My Drive/RGB1/') # os.chdir(r'/content/drive/My Drive/imgs/') # os.chdir(r'/content/drive/My Drive/data/imgs/') print(os.getcwd()) !ls !pip install keras==2.1.5 import keras print(keras.__version__) !nvidia-smi import os os.environ['KERAS_BACKEND']='tensorflow' import keras import numpy as np np.random.seed(2017) import os import glob import cv2 import math import pickle import pandas as pd import random from tqdm import tqdm from matplotlib import pyplot as plt from keras.models import Sequential from keras.layers.core import Dense, Dropout, Activation, Flatten from keras.layers.convolutional import Conv2D from keras.layers.pooling import MaxPooling2D from keras.optimizers import SGD, Adam, Nadam from keras.utils import np_utils from keras.models import model_from_json from sklearn.metrics import log_loss<jupyter_output><empty_output><jupyter_text>### 图片读取函数 针对Xception模型进行图片读取和预处理。具体而言,使用OpenCV库从路径获取图片,尺寸改变为299*299*3。通道转换为RGB顺序。像素归一化。<jupyter_code># Load a preprocessed image def get_im_cv2(path, img_size): img = cv2.imread(path) img = np.array(img, dtype=np.float32) img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) # reduce size img = cv2.resize(img, (img_size, img_size)) # normalization img /= 127.5 img -= 1. #print (img[1:5, 1:5, 0]) return img<jupyter_output><empty_output><jupyter_text>### 图片读取函数,附加实时增强 针对Xception模型进行图片读取和预处理。具体而言,使用OpenCV库从路径获取图片。然后进行图片增强处理,绕图片中心点随机旋转-10到10度,并在横向上平移-50到50个pixel。这个范围是观察一些图片后,根据不同图片之间拍摄的角度和位置差异,决定在这个范围内做随机增强。目的是加强模型的泛化能力,防止过拟合。 和上一个函数一样,也针对Xception模型进行预处理。尺寸改变为299*299*3。通道转换为RGB顺序。像素归一化。<jupyter_code>from keras.preprocessing import image # image rotation def rotate(x, degree, row_axis=0, col_axis=1, channel_axis=2, fill_mode='wrap', cval=0.): theta = np.pi / 180 * degree rotation_matrix = np.array([[np.cos(theta), -np.sin(theta), 0], [np.sin(theta), np.cos(theta), 0], [0, 0, 1]]) h, w = x.shape[row_axis], x.shape[col_axis] transform_matrix = image.transform_matrix_offset_center(rotation_matrix, h, w) x = image.apply_transform(x, transform_matrix, channel_axis, fill_mode, cval) return x # image shift def shift(x, wshift, hshift, row_axis=0, col_axis=1, channel_axis=2, fill_mode='wrap', cval=0.): h, w = x.shape[row_axis], x.shape[col_axis] #读取图片的高和宽 tx = hshift * h #高偏移大小,若不偏移可设为0,若向上偏移设为正数 ty = wshift * w #宽偏移大小,若不偏移可设为0,若向左偏移设为正数 translation_matrix = np.array([[1, 0, tx], [0, 1, ty], [0, 0, 1]]) transform_matrix = translation_matrix x = image.apply_transform(x, transform_matrix, channel_axis, fill_mode, cval) return x # PCA def RGB_PCA(images): pixels = images.reshape(-1, images.shape[-1]) m = np.mean(pixels, axis=0) pixels -= m C = np.cov(pixels, rowvar=False) l, v = np.linalg.eig(C) idx = np.argsort(l)[::-1] v = v[:,idx] l = l[idx] #print (C.shape, len(l), len(v)) return l, v def RGB_variations(image, eig_val, eig_vec): a = 0.1 * np.random.randn(3) v = np.array([a[0]*eig_val[0], a[1]*eig_val[1], a[2]*eig_val[2]]) variation = np.dot(eig_vec, v) return image + variation # change HSV def randomHueSaturationValue(image, hue_shift_limit=(-10, 10), sat_shift_limit=(-75, 75), val_shift_limit=(-75, 75), u=0.5): if np.random.random() < u: img = cv2.cvtColor(image, cv2.COLOR_BGR2HSV) h, s ,v = img[:,:,0],img[:,:,1],img[:,:,2] hue_shift = np.random.uniform(hue_shift_limit[0], hue_shift_limit[1]) h = h + hue_shift sat_shift = np.random.uniform(sat_shift_limit[0], sat_shift_limit[1]) s = s + sat_shift val_shift = np.random.uniform(val_shift_limit[0], val_shift_limit[1]) v = v + val_shift img[:,:,0],img[:,:,1],img[:,:,2] = h, s ,v image = cv2.cvtColor(img, cv2.COLOR_HSV2BGR) return image # Load a random augmented image def get_im_cv2_aug(path, img_size): img = cv2.imread(path) img = np.array(img, dtype=np.float32) img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) #random rotate deg = random.uniform(-10, 10) #random rotate limit img = rotate(img, deg) #random shift wshift = random.uniform(-0.1, 0.1) hshift = random.uniform(-0.1, 0.1) img = shift(img, wshift, hshift) # change HSV #img = randomHueSaturationValue(img) # PCA #img = img/255.0 #l, v = RGB_PCA(img) #img = RGB_variations(img, l, v) #img = img * 255.0 # reduce size img = cv2.resize(img, (img_size, img_size)) # normalization img /= 127.5 img -= 1. return img<jupyter_output><empty_output><jupyter_text>### 切分数据 按照司机的id来切分训练数据集。把driver_imgs_list分成两个列表。一个是训练列表,里面是所有训练集司机的图片文件名。另一个是验证列表,里面是所有验证集司机的图片文件名。然后把两个列表都随机打乱。 这里列表里面保存的都只是excel文件里的一行行数据(包括文件名,分类,司机id),而不是图片本身。等训练时通过生成器读取图片,这样节约内存。 另外还保存了一个类别字典,便于以后从同一类中抽取图片进行拼接。<jupyter_code># divide drivers unique_list_train = [] unique_list_valid = [] for i in range(13): unique_list_train.append('S'+str(i+1)) print(unique_list_train) for i in range(4): unique_list_valid.append('S'+str(i+14)) print(unique_list_valid) # unique_list_train = ['p002', 'p012', 'p014', 'p015', 'p026', 'p035', 'p039', 'p041', 'p042', 'p045', 'p047', 'p049', 'p050', 'p051', 'p052', 'p056', 'p061', 'p064', 'p066', 'p072', 'p075'] # unique_list_valid = ['p016', 'p021', 'p022', 'p024'] #print (unique_list_train, unique_list_valid) # get index: driver_id, class, image name index = os.path.join('.', 'driver_id_class_jpg.csv') # build the driver id dictionary and class dictionary f = open(index, 'r') id_dict = dict() class_dict = dict() lines = f.readlines() # print(lines[0]) # for line in lines[1:]: # arr = line.strip().split(',') # if arr[0] not in id_dict.keys(): # id_dict[arr[0]] = [line] # else: # id_dict[arr[0]].append(line) # if arr[1] not in class_dict.keys(): # class_dict[arr[1]] = [line] # else: # class_dict[arr[1]].append(line) # f.close() # split the train list and valid list by id train_list = [] valid_list = [] # for id in id_dict.keys(): # if id in unique_list_train: # train_list.extend(id_dict[id]) # elif id in unique_list_valid: # valid_list.extend(id_dict[id]) for line in lines[1:]: arr = line.strip().split(',') if arr[0] in unique_list_train: train_list.append(line) elif arr[0] in unique_list_valid: valid_list.append(line) f.close() random.shuffle(train_list) random.shuffle(valid_list) print (len(train_list), len(valid_list))<jupyter_output>['S1', 'S2', 'S3', 'S4', 'S5', 'S6', 'S7', 'S8', 'S9', 'S10', 'S11', 'S12', 'S13'] ['S14', 'S15', 'S16', 'S17'] 28517 8491 <jupyter_text>### 获取test set图片的列表 获取sample_submission.csv中所有测试图片的文件名,预测test set时使用。<jupyter_code>test_index = os.path.join('.', 'sample_submission.csv') f = open(test_index, 'r') lines = f.readlines() test_list = [] for line in lines[1:]: arr = line.strip().split(',') test_list.append(arr[0]) f.close() print (test_list[:5])<jupyter_output><empty_output><jupyter_text>### 转换为One Hot Encode标签 对分类标签进行One Hot Encode的函数如下。<jupyter_code># one hot encode the class label from sklearn import preprocessing lb = preprocessing.LabelBinarizer() class_id = [] for i in range(16): class_id.append('AC'+str(i+1)) lb.fit(class_id) def one_hot_encode(x): return lb.transform(x) t = one_hot_encode(class_id) print(t)<jupyter_output>[[1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0] [0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0] [0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0] [0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0] [0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0] [0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0] [0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0] [0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0] [0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1] [0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0] [0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0] [0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0] [0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0] [0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0] [0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0] [0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0]] <jupyter_text>### 训练图片生成器 从训练列表中遍历。yield一个batch的训练图片及其标签。图片经过了实时增强。另外还有50%的概率随机选取另一张同类里的图片,将两张的左右各半边拼接在一起。这也是为了训练模型对分类的关键部位进行学习,而不是记住司机的样子<jupyter_code># define my train data generator def train_gen(batch_size): #random.shuffle(train_list) # 每一代都随机shuffle训练集 current = 0 while 1: x = [] y = [] while len(y) < batch_size: line = train_list[current] arr = line.strip().split(',') path1 = os.path.join('.', str(arr[0]), str(arr[1]),str(arr[2])) img = get_im_cv2_aug(path1, 299) # if random.random()>0.5: # line2 = random.choice(class_dict[arr[1]]) # bname = line2.strip().split(',')[2] # path2 = os.path.join('.', 'train', str(arr[1]), str(bname)) # img2 = get_im_cv2_aug(path2, 299) # left = img[:, :150, :] # right = img2[:, 150:, :] # img = np.concatenate((left, right), axis=1) x.append(img) label = one_hot_encode([str(arr[1])])[0] y.append(label) current += 1 if current >= len(train_list): current = 0 x = np.array(x) x = x.reshape(batch_size, img_size, img_size, 3) y = np.array(y, dtype = np.uint8) y = y.reshape(batch_size, 16) yield (x, y)<jupyter_output><empty_output><jupyter_text>### 验证图片生成器 从验证列表中遍历。yield一个batch的验证图片及其标签。为了体现模型训练后的拟合能力,图片没有进行实时增强。<jupyter_code># define my validation data generator def valid_gen(batch_size): current = 0 while 1: x = [] y = [] while len(y) < batch_size: line = valid_list[current] arr = line.strip().split(',') path = os.path.join('.', str(arr[0]), str(arr[1]),str(arr[2])) #print (path) img = get_im_cv2(path, 299) x.append(img) label = one_hot_encode([str(arr[1])])[0] y.append(label) current += 1 if current >= len(valid_list): current = 0 x = np.array(x) x = x.reshape(batch_size, img_size, img_size, 3) y = np.array(y, dtype = np.uint8) y = y.reshape(batch_size, 16) yield (x, y)<jupyter_output><empty_output><jupyter_text>### 构建模型 用keras构建模型。使用在ImageNet上预训练好的Xception模型,接上一个global average pooling层,dropout防止过拟合,最后一个全连接层输出10个类别的概率。在全连接层的权重上采用了L2正则化。锁定模型的前70层不更新权重.<jupyter_code>from keras import backend as K K.clear_session() # new model from keras.models import * from keras.layers import * from keras.applications import * from keras.preprocessing.image import * img_size = 299 x = Input((img_size, img_size, 3)) base_model = ResNet50(input_tensor = x, weights='imagenet', include_top=False) x = GlobalAveragePooling2D()(base_model.output) x = Dropout(0.5)(x) x = Dense(16, activation='softmax', kernel_regularizer=regularizers.l2(0.01))(x) model = Model(base_model.input, x) for i in range(53): model.layers[i].trainable = False model.summary() len(model.layers)<jupyter_output>WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/keras/backend/tensorflow_backend.py:507: The name tf.placeholder is deprecated. Please use tf.compat.v1.placeholder instead. WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/keras/backend/tensorflow_backend.py:3831: The name tf.random_uniform is deprecated. Please use tf.random.uniform instead. WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/keras/backend/tensorflow_backend.py:167: The name tf.get_default_session is deprecated. Please use tf.compat.v1.get_default_session instead. WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/keras/backend/tensorflow_backend.py:174: The name tf.ConfigProto is deprecated. Please use tf.compat.v1.ConfigProto instead. WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/keras/backend/tensorflow_backend.py:179: The name tf.Session is deprecated. Please use tf.compat.v1.Session instead. WARNING:tensorflow:From /usr/local/lib/python3.6/d[...]<jupyter_text>### 训练 进行模型训练。这里batch size为64。用了自适应优化器Nadam,使用schedule learning rate自动调整学习率的方法并在验证loss不下降时early stopping。一共训练5代,最后的验证集loss仍然有下降空间。steps per epoch设定为在一个epoch内所有训练图片被遍历1次.<jupyter_code>from keras.callbacks import ModelCheckpoint, EarlyStopping, LearningRateScheduler model.compile(optimizer=Nadam(),loss='categorical_crossentropy',metrics=['categorical_accuracy']) def learning_rate(epoch): ini_lr = 0.002 lr = ini_lr * pow(10, -epoch) return lr cp = ModelCheckpoint(filepath="my_model1_MDAD_batchsize_32.h5", save_best_only=True) es = EarlyStopping() lrs = LearningRateScheduler(learning_rate) hist = model.fit_generator(train_gen(32), steps_per_epoch = 892, epochs = 5, workers=4, max_q_size=20, use_multiprocessing=True, validation_data = valid_gen(32), validation_steps = 266, callbacks=[cp, es, lrs])<jupyter_output>WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/keras/optimizers.py:757: The name tf.train.Optimizer is deprecated. Please use tf.compat.v1.train.Optimizer instead. WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/keras/backend/tensorflow_backend.py:3008: The name tf.log is deprecated. Please use tf.math.log instead. WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/tensorflow_core/python/ops/math_grad.py:1424: where (from tensorflow.python.ops.array_ops) is deprecated and will be removed in a future version. Instructions for updating: Use tf.where in 2.0, which has the same broadcast rule as np.where
no_license
/MDAD/rgb/M1_dataset_MDAD.ipynb
perfectism13/learning_colab
10
<jupyter_start><jupyter_text># CS 109A/AC 209A/STAT 121A Data Science: Homework 4 **Harvard University** **Fall 2016** **Instructors: W. Pan, P. Protopapas, K. Rader** **Due Date: ** Wednesday, October 5th, 2016 at 11:59pmDownload the `IPython` notebook as well as the data file from Vocareum and complete locally. To submit your assignment, in Vocareum, upload (using the 'Upload' button on your Jupyter Dashboard) your solution to Vocareum as a single notebook with following file name format: `last_first_CourseNumber_HW4.ipynb` where `CourseNumber` is the course in which you're enrolled (CS 109a, Stats 121a, AC 209a). Submit your assignment in Vocareum using the 'Submit' button. **Avoid editing your file in Vocareum after uploading. If you need to make a change in a solution. Delete your old solution file from Vocareum and upload a new solution. Click submit only ONCE after verifying that you have uploaded the correct file. The assignment will CLOSE after you click the submit button.** Problems on homework assignments are equally weighted. The Challenge Question is required for AC 209A students and optional for all others. Student who complete the Challenge Problem as optional extra credit will receive +0.5% towards your final grade for each correct solution. Import libraries<jupyter_code>import numpy as np import pandas as pd from sklearn.linear_model import LinearRegression as Lin_Reg from sklearn.linear_model import Ridge as Ridge_Reg from sklearn.linear_model import Lasso as Lasso_Reg from statsmodels.regression.linear_model import OLS import sklearn.preprocessing as Preprocessing from sklearn.cross_validation import train_test_split from sklearn.cross_validation import KFold from sklearn.cross_validation import cross_val_score import itertools as it import matplotlib import matplotlib.pyplot as plt import matplotlib.cm as cmx import matplotlib.colors as colors import scipy as sp from itertools import combinations %matplotlib inline<jupyter_output><empty_output><jupyter_text># I'm a title ## Problem 0: Basic Information Fill in your basic information. ### Part (a): Your name[Hagmann, Tim]### Part (b): Course Number[CS 109a]### Part (c): Who did you work with?-**All data sets can be found in the ``datasets`` folder and are in comma separated value (CSV) format**## Problem 1: Variable selection and regularization The data set for this problem is provided in ``dataset_1.txt`` and contains 10 predictors and a response variable. ### Part (a): Analyze correlation among predictors - By visually inspecting the data set, do find that some of the predictors are correlated amongst themselves? - Compute the cofficient of correlation between each pair of predictors, and visualize the matrix of correlation coefficients using a heat map. Do the predictors fall naturally into groups based on the correlation values? - If you were asked to select a minimal subset of predictors based on the correlation information in order to build a good regression model, how many predictors will you pick, and which ones will you choose? <jupyter_code># Load data data = np.loadtxt('datasets/dataset_1.txt', delimiter=',', skiprows=1) # Split predictors and response x = data[:, :-1] y = data[:, -1] df = pd.DataFrame(data) df.head() # Compute matrix of correlation coefficients corr_matrix = np.corrcoef(x.T) pd.DataFrame(corr_matrix) # Display heat map fig, ax = plt.subplots(1, 1, figsize=(6, 6)) ax.pcolor(corr_matrix) ax.set_title('Heatmap of correlation matrix') plt.show()<jupyter_output><empty_output><jupyter_text> ** Comment: ** pc It also shows that there are natural groups amongst them. There is a correlated group among the variables 0 to 2 and 3 to 6. When building a good regression model with a subset i would pick on of the variables 0 to 2, one 3 to 6 and 7, 8 and 9, i.e., 5 variables. ### Part (b): Selecting minimal subset of predictors - Apply the variable selection methods discussed in class to choose a minimal subset of predictors that yield high prediction accuracy: - Exhaustive search - Step-wise forward selection **or** Step-wise backward selection &emsp;&nbsp;&nbsp; In each method, use the Bayesian Information Criterion (BIC) to choose the subset size. - Do the chosen subsets match the ones you picked using the correlation matrix you had visualized in Part (a)? **Note**: You may use the `statsmodels`'s `OLS` module to fit a linear regression model and evaluate BIC. You may **not** use library functions that implement variable selection.<jupyter_code>### Best Subset Selection min_bic = 1e10 # set some initial large value for min BIC score best_subset = [] # best subset of predictors # Create all possible subsets of the set of 10 predictors predictor_set = set(range(10)) # predictor set = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9} # Repeat for every possible size of subset for size_k in range(10): # Create all possible subsets of size 'size', # using the 'combination' function from the 'itertools' library subsets_of_size_k = it.combinations(predictor_set, size_k + 1) max_r_squared = -1e10 # set some initial small value for max R^2 score best_k_subset = [] # best subset of predictors of size k # Iterate over all subsets of our predictor set for predictor_subset in subsets_of_size_k: # Use only a subset of predictors in the training data x_subset = x[:, predictor_subset] # Fit and evaluate R^2 model = OLS(y, x_subset) results = model.fit() r_squared = results.rsquared # Update max R^2 and best predictor subset of size k # If current predictor subset has a higher R^2 score than that of the best subset # we've found so far, remember the current predictor subset as the best! if(r_squared > max_r_squared): max_r_squared = r_squared best_k_subset = predictor_subset[:] # Use only the best subset of size k for the predictors x_subset = x[:, best_k_subset] # Fit and evaluate BIC of the best subset of size k model = OLS(y, x_subset) results = model.fit() bic = results.bic # Update minimum BIC and best predictor subset # If current predictor has a lower BIC score than that of the best subset # we've found so far, remember the current predictor as the best! if(bic < min_bic): min_bic = bic best_subset = best_k_subset[:] print('Best subset by exhaustive search:') print sorted(best_subset) ### Step-wise Forward Selection d = x.shape[1] # total no. of predictors # Keep track of current set of chosen predictors, and the remaining set of predictors current_predictors = [] remaining_predictors = range(d) # Set some initial large value for min BIC score for all possible subsets global_min_bic = 1e10 # Keep track of the best subset of predictors best_subset = [] # Iterate over all possible subset sizes, 0 predictors to d predictors for size in range(d): max_r_squared = -1e10 # set some initial small value for max R^2 best_predictor = -1 # set some throwaway initial number for the best predictor to add bic_with_best_predictor = 1e10 # set some initial large value for BIC score # Iterate over all remaining predictors to find best predictor to add for i in remaining_predictors: # Make copy of current set of predictors temp = current_predictors[:] # Add predictor 'i' temp.append(i) # Use only a subset of predictors in the training data x_subset = x[:, temp] # Fit and evaluate R^2 model = OLS(y, x_subset) results = model.fit() r_squared = results.rsquared # Check if we get a higher R^2 value than than current max R^2, if so, update if(r_squared > max_r_squared): max_r_squared = r_squared best_predictor = i bic_with_best_predictor = results.bic # Remove best predictor from remaining list, and add best predictor to current list remaining_predictors.remove(best_predictor) current_predictors.append(best_predictor) # Check if BIC for with the predictor we just added is lower than # the global minimum across all subset of predictors if(bic_with_best_predictor < global_min_bic): best_subset = current_predictors[:] global_min_bic = bic_with_best_predictor print 'Step-wise forward subset selection:' print sorted(best_subset) # add 1 as indices start from 0 ### Step-wise Backward Selection d = x.shape[1] # total no. of predictors # Keep track of current set of chosen predictors current_predictors = range(d) # First, fit and evaluate BIC using all 'd' number of predictors model = OLS(y, x) results = model.fit() bic_all = results.bic # Set the minimum BIC score, initially, to the BIC score using all 'd' predictors global_min_bic = bic_all # Keep track of the best subset of predictors best_subset = [] # Iterate over all possible subset sizes, d predictors to 1 predictor for size in range(d - 1, 1, -1): # stop before 0 to avoid choosing an empty set of predictors max_r_squared = -1e10 # set some initial small value for max R^2 worst_predictor = -1 # set some throwaway initial number for the worst predictor to remove bic_without_worst_predictor = 1e10 # set some initial large value for min BIC score # Iterate over current set of predictors (for potential elimination) for i in current_predictors: # Create copy of current predictors, and remove predictor 'i' temp = current_predictors[:] temp.remove(i) # Use only a subset of predictors in the training data x_subset = x[:, temp] # Fit and evaluate R^2 model = OLS(y, x_subset) results = model.fit() r_squared = results.rsquared # Check if we get a higher R^2 value than than current max R^2, if so, update if(r_squared > max_r_squared): max_r_squared = r_squared worst_predictor = i bic_without_worst_predictor = results.bic # Remove worst predictor from current set of predictors current_predictors.remove(worst_predictor) # Check if BIC for the predictor we just removed is lower than # the global minimum across all subset of predictors if(bic_without_worst_predictor < global_min_bic): best_subset = current_predictors[:] global_min_bic = bic_without_worst_predictor print 'Step-wise backward subset selection:' print sorted(best_subset)<jupyter_output>Step-wise backward subset selection: [2, 5, 7, 8, 9] <jupyter_text> ** Comment: ** The subset selection shows that similar variables as the ones picked by me are chosen, i.e., [2, 5, 7, 8, 9] or [0, 5, 7, 8, 9]. ### Part (c): Apply Lasso and Ridge regression - Apply Lasso regression with regularization parameter $\lambda = 0.01$ and fit a regression model. - Identify the predictors that are assigned non-zero coefficients. Do these correspond to the correlation matrix in Part (a)? - Apply Ridge regression with regularization parameter $\lambda = 0.01$ and fit a regression model. - Is there a difference between the model parameters you obtain different and those obtained from Lasso regression? If so, explain why. - Identify the predictors that are assigned non-zero coefficients. Do these correspond to the correlation matrix in Part (a)? - Is there anything peculiar that you observe about the coefficients Ridge regression assigns to the first three predictors? Do you observe the same with Lasso regression? Give an explanation for your observation. **Note**: You may use the `statsmodels` or `sklearn` to perform Lasso and Ridge regression.<jupyter_code># Lasso regression reg = Lasso_Reg(alpha = 0.01) reg.fit(x, y) coefficients = reg.coef_ print 'Lasso:' print 'Coefficients:', coefficients print 'Predictors with non-zero coefficients:', [i for i, item in enumerate(coefficients) if abs(item) > 0]<jupyter_output>Lasso: Coefficients: [ 0.02717417 0. 0. -0. -0.02532806 -0. -0. 0.04397321 -0.40612185 -0.22260474] Predictors with non-zero coefficients: [0, 4, 7, 8, 9] <jupyter_text> ** Comment: ** The predictors correspond with the correlation matrix, i.e., [0, 4, 7, 8, 9] <jupyter_code># Ridge regression: Fit and evaluate reg = Ridge_Reg(alpha = 0.01) x[:,1] = x[:,0] x[:,2] = x[:,0] reg.fit(x, y) coefficients = reg.coef_ print 'Ridge:' print 'Coefficients:', coefficients print 'Predictors with non-zero coefficients:', [i for i, item in enumerate(coefficients) if abs(item) > 0]<jupyter_output>Ridge: Coefficients: [ 0.04353543 0.04353543 0.04353543 0.55217415 -0.19706852 -0.61421737 0.30484213 0.18742866 -0.50083242 -0.35908145] Predictors with non-zero coefficients: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] <jupyter_text> ** Comment: ** As is to be expected, the ridge regression selects all variables, i.e., [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]. That means it doesn't correspond with the correlation matrix. The ridge regression assigns exactly the same value to the variables 0 to 2. This is because there is a prefect i.e., 1:1 correlation between those variables. The lasso regression doesn't show the same behavior, as there is only on of those variables selected. ## Problem 2: Cross-validation and Bootstrapping In this problem, you will work with an expanded version of the automobile pricing data set you analyzed in Homework 3. The data set is contained ``dataset_2.txt``, with 26 attribues (i.e. predictors) for each automobile and corresponding prices. ### Part(a): Encode categorical attributes and fill missing values Identify the categorical attributes in the data. Replace their values with the one-hot binary encoding. You may do this using the `get_dummies()` function in `pandas`. If you do this task correctly, you should get a total of 69 predictors after the encoding. <jupyter_code>## Load data data = pd.read_csv('datasets/dataset_2.txt') # Split predictors and response y = data.iloc[:, -1] x = data.iloc[:, :-1] ## Transformations # Transform symboling into categorical variable x['symboling'] = x['symboling'].astype(object) # Get categorical values categorical_bolean = (x.dtypes == "object").values x_numeric = x.loc[:, ~categorical_bolean] x_categorical = x.loc[:, categorical_bolean] x_dummies = pd.get_dummies(x_categorical) ## Scale the numerical x variables x_numeric = x_numeric / x_numeric.std(axis=0) ## Bind data together x = pd.concat([x_numeric, x_dummies], axis=1) print x.shape <jupyter_output>(205, 69) <jupyter_text>### Part (b): Apply regular linear regression - Split the data set into train and test sets, with the first 25% of the data for training and the remaining for testing. - Use regular linear regression to fit a model to the training set and evaluate the R^2 score of the fitted model on both the training and test sets. What do you observe about these values? - You had seen in class that the R^2 value of a least-squares fit to a data set would lie between 0 and 1. Is this true for the test R^2 values reported above? If not, give a reason for why this is the case. - Is there a need for regularization while fitting a linear model to this data set? **Note**: You may use the `statsmodels` or `sklearn` to fit a linear regression model and evaluate the fits.<jupyter_code>## Split the dataset np.random.seed(856) # Set seed x_train, x_test = train_test_split(x, test_size = 0.25) ## Split the variables y_train = y.iloc[x_train.index.values] y_test = y.iloc[x_test.index.values] x_train = x.iloc[x_train.index.values, :] x_test = x.iloc[x_test.index.values, :] ## Transform to numpy y_train = y_train.values y_test = y_test.values x_train = x_train.values x_test = x_test.values # First, let's do plain OLS reg = Lin_Reg() reg.fit(x_train, y_train) train_r_squared_plain = reg.score(x_train, y_train) test_r_squared_plain = reg.score(x_test, y_test) print 'Plain Regression: R^2 score on training set', train_r_squared_plain print 'Plain Regression: R^2 score on test set', test_r_squared_plain<jupyter_output>Plain Regression: R^2 score on training set 0.990955195397 Plain Regression: R^2 score on test set 0.958059163439 <jupyter_text> ** Comment: ** The R^2 values of the model are very high, i.e., 99% in the training an 95% in the test set. That is a near perfect fit. However, as is to be expected, the training set overfits the data a bit. Which can be noticed by the smaller test R^2. As it should be, both values lie between 0 and 1. In this case of the linear regression it is not necessary to regularize the data. ### Part (c): Apply Ridge regression - Apply Ridge regression on the training set for different values of the regularization parameter $\lambda$ in the range $\{10^{-7}, 10^{-6}, \ldots, 10^7\}$. Evaluate the R^2 score for the models you obtain on both the train and test sets. Plot both values as a function of $\lambda$. - Explain the relationship between the regularization parameter and the training and test R^2 scores. - How does the best test R^2 value obtained using Ridge regression compare with that of plain linear regression? Explain. **Note**: You may use the `statsmodels` or `sklearn` to fit a ridge regression model and evaluate the fits.<jupyter_code># Ridge regression: Fit and evaluate reg = Ridge_Reg(alpha = 1.0) reg.fit(x_train, y_train) coefficients = reg.coef_ predictors = [i for i, item in enumerate(coefficients) if abs(item) > 0] print 'Ridge:' print 'Coefficients:', coefficients print 'Selected predictors:',predictors print 'Ridge Regression: R^2 score on training set', reg.score(x_train,y_train) print 'Ridge Regression: R^2 score on test set', reg.score(x_test,y_test) # Run for different values of lambda lambda_min = -7 lambda_max = 7 num_lambdas = 15 num_predictors = x.shape[1] lambdas= np.linspace(lambda_min,lambda_max, num_lambdas) train_r_squared = np.zeros(num_lambdas) test_r_squared = np.zeros(num_lambdas) coeff_a = np.zeros((num_lambdas, num_predictors)) for ind, i in enumerate(lambdas): # Fit ridge regression on train set reg = Ridge_Reg(alpha = 10**i) reg.fit(x_train, y_train) coeff_a[ind,:] = reg.coef_ # Evaluate train & test performance train_r_squared[ind] = reg.score(x_train, y_train) test_r_squared[ind] = reg.score(x_test, y_test) # Plotting plt.figure(figsize=(18, 8)) plt.plot(train_r_squared, 'bo-', label=r'$R^2$ Training set', color="darkblue", alpha=0.6, linewidth=3) plt.plot(test_r_squared, 'bo-', label=r'$R^2$ Test set', color="darkred", alpha=0.6, linewidth=3) plt.xlabel('Lamda value'); plt.ylabel(r'$R^2$') plt.xlim(0, 14) plt.title(r'Evaluate ridge regression $R^2$ with different lamdas') plt.legend(loc='best') plt.grid() <jupyter_output><empty_output><jupyter_text> ** Comment: ** The best R^2 in the test set is reached when the lamda value is 0, i.e., when a standard linear regression is performed. That means that with this particular data set the variables shouldn't be scaled back. A lamda of zero means a OLS is preformerd while when the lamda goes to infinity the coefficients go to zero. ### Part (d): Tune regularization parameter using cross-validation and bootstrapping - Evaluate the performance of the Ridge regression for different regularization parameters $\lambda$ using 5-fold cross validation **or** bootstrapping on the training set. - Plot the cross-validation (CV) or bootstrapping R^2 score as a function of $\lambda$. - How closely does the CV score or bootstrapping score match the R^2 score on the test set? Does the model with lowest CV score or bootstrapping score correspond to the one with maximum R^2 on the test set? - Does the model chosen by CV or bootstrapping perform better than plain linear regression? **Note**: You may use the `statsmodels` or `sklearn` to fit a linear regression model and evaluate the fits. You may also use `kFold` from `sklearn.cross_validation`. <jupyter_code># Run for different values of lambda lambda_min = -7 lambda_max = 7 num_lambdas = 15 num_predictors = x.shape[1] lambdas= np.linspace(lambda_min,lambda_max, num_lambdas) train_r_squared = np.zeros(num_lambdas) test_r_squared = np.zeros(num_lambdas) kfold = KFold(n=len(x), n_folds=5, random_state=123) for ind, i in enumerate(lambdas): # Fit ridge regression on train set reg = Ridge_Reg(alpha = 10**i) reg.fit(x_train, y_train) results = cross_val_score(reg, x, y, cv=kfold, scoring="r2") # Evaluate train & test performance train_r_squared[ind] = results.mean() test_r_squared[ind] = reg.score(x_test, y_test) # Plotting plt.figure(figsize=(18, 8)) plt.plot(train_r_squared, 'bo-', label=r'$R^2$ Training set', color="darkblue", alpha=0.6, linewidth=3) plt.plot(test_r_squared, 'bo-', label=r'$R^2$ Test set', color="darkred", alpha=0.6, linewidth=3) plt.xlabel('Lamda value'); plt.ylabel(r'$R^2$') plt.xlim(0, 14) plt.title(r'Evaluate 5-fold cv with different lamdas') plt.legend(loc='best') plt.grid() <jupyter_output><empty_output><jupyter_text> ** Comment: ** The $R^2$ for the test set from the cross validation follows a similar shape to the $R^2$ score on the test set from the other regressions. The maximum $R^2$ from cross validation is achieved for $\lambda = 0$ and from ridge regression prediction on the test set, the maximum value of $R^2$ is also achieved with $\lambda = 0$. That mean both the CV and the ridge regression select the same model as the OLS. The lowest MSE is achieved for $\lambda = 0$ which corresponds to the model selected by the R^2 measure. As all the models tend to go to lamda = 0 they perform the same as the OLS regression. ## Problem 3: Ridge regression *via* ordinary least-squares regression We present an approach to implement Ridge regression using oridinary least-squares regression. Given a matrix of responses $\mathbf{X} \in \mathbb{R}^{n\times p}$ and response vector $\mathbf{y} \in \mathbb{R}^{n}$, one can implement Ridge regression with regularization parameter $\lambda$ as follows: - Augment the matrix of predictors $\mathbf{X}$ with $p$ new rows containing the scaled identity matrix $\sqrt{\lambda}\mathbf{I} \in \mathbb{R}^{p \times p}$, i.e. $$\overline{\mathbf{X}} \,=\, \begin{bmatrix} X_{11} & \ldots & X_{1p}\\ \vdots & \ddots & \vdots\\ X_{n1} & \ldots & X_{np}\\ \sqrt{\lambda} & \ldots & 0\\ \vdots & \ddots & \vdots\\ 0 & \ldots & \sqrt{\lambda} \end{bmatrix} \,\in\, \mathbb{R}^{(n+p)\times p} . $$ - Augment the response vector $\mathbf{y}$ with a column of $p$ zeros, i.e. $$ \overline{\mathbf{y}} \,=\, \begin{bmatrix} y_{1}\\ \vdots\\ y_{n}\\ 0\\ \vdots\\ 0 \end{bmatrix} \,\in\, \mathbb{R}^{n+p}. $$ - Apply ordinary least-squares regression on the augmented data set $(\overline{\mathbf{X}}, \overline{\mathbf{y}})$. ### Part (a): Show the proposed approach implements Ridge regression Show that the approach proposed above implements Ridge regression with parameter $\lambda$. ** Solution: ** The ridge regression takes the following form: $$L(\beta) = (Y-X\beta)^T(Y-X\beta) + \lambda\beta^T\beta $$ The normal equations for ridge regression is: $$(X^T\,X + \lambda I)\beta = X^T Y $$ so $\beta$ is solved by using the following matrix inverse: $$\beta = (X^T\,X + \lambda I)^{-1} X^T Y $$ X and Y can be enhanced so that the normal equation has the same form as for ordinary least squeres. That means including the $\lambda\beta^2$ into an $\overline{\mathbf{X}}$ matrix and $\overline{\mathbf{y}}$ vector. As $\lambda >0$ the square root $\sqrt{\lambda}$ will be positive so the matrix X (an n×p matrix) can be augemented by appending the p x p matrix $\sqrt{\lambda}I$, and the vector Y can be extended by appending a column of p zeros. The product $(\overline{\mathbf{y}}-\overline{\mathbf{X}}\beta)^T(\overline{\mathbf{y}}-\overline{\mathbf{X}}\beta)$ adds an additional p terms to the objective function of ordinary least squares of the form: $$(0 - \sqrt{\lambda}\beta_i)^2 = \lambda\beta^T\beta$$ which equals to the additional term in the function for the ridge regression. That means that the normal equation for $\beta$ using the ridge regression can be rewritten as $$(Y-X\beta)^T(Y-X\beta) + \lambda\beta^2 = (\overline{\mathbf{y}}-\overline{\mathbf{X}}\beta)^T(\overline{\mathbf{y}}-\overline{\mathbf{X}}\beta)$$ From the right the normal equation can then be rewritten as: $${\overline{\mathbf{X}}}^T\,\overline{\mathbf{X}}\beta = {\overline{\mathbf{X}}}^T \overline{\mathbf{y}} $$ which then can be solved using OLS. The function for ridge regression takes the form: $$F(\beta) = (Y-X\beta)^T(Y-X\beta) + \lambda\beta^T\beta $$ The function for OLS regression with $(\overline{\mathbf{X}}, \overline{\mathbf{y}})$ takes the form: $$F(\beta)= (\overline{\mathbf{y}} - \overline{\mathbf{X}})^T(\overline{\mathbf{y}} - \overline{\mathbf{X}})$$ $\overline{\mathbf{y}}^T\overline{\mathbf{y}}=y^Ty$, as the bottom p terms of $\overline{\mathbf{y}}$ are all zero. $\overline{\mathbf{y}}^T\overline{\mathbf{X}}=y^TX$, as the bottom p terms of $\overline{\mathbf{y}}$ are all zero. $\overline{\mathbf{X}}^T\overline{\mathbf{X}}=\Big( X^T \ \sqrt{\lambda}I_p \Big) \begin{pmatrix}X \sqrt{\lambda}I_p\end{pmatrix}$. That means, $$ L_{OLS}({\beta}) = (\overline{\mathbf{y}}-\overline{\mathbf{X}}\beta)^T(\overline{\mathbf{y}} - \overline{\mathbf{X}}\beta)$$ $$ = \overline{\mathbf{y}}^T\overline{\mathbf{y}} -2\overline{\mathbf{y}}^T\overline{\mathbf{X}} \beta + \beta^T \overline{\mathbf{X}}^T\overline{\mathbf{X}} \beta $$ $$ = y^Ty -2y^TX + \beta^T (X^T X + \lambda I_p) \beta $$ $$ = y^Ty -2y^TX + \beta^T(X^T X)\beta + \lambda \beta^T\beta $$ $$ = (y-X\beta)^T(y-X\beta) + \lambda\beta^T\beta $$ $$ = L_{RIDGE}(\beta) $$ That shows that the approach proposed implements Ridge regression with parameter $\lambda$. ### Part (b): Debug our implementation of ridge regression You're a grader for CS109A, the following is an implemention of Ridge regression (via the above approach) submitted by a student. The dataset is ``dataset_3.txt``. The regression model is fitted to a training set, and the R^2 scores of the fitted model on the training and test sets are plotted as a function of the regularization parameter. Grade this solution according to the following rubric (each category is equally weighted): - correctness - interpretation (if applicable) - code/algorithm design - presentation In addition to providing an holistic grade (between 0 to 5), provide a corrected version of this code that is submission quality.<jupyter_code># Fit def ridge(x_train, y_train, reg_param): n=np.shape(x_train)[0] x_train=np.concatenate((x_train,reg_param*np.identity(n)),axis=1) y_train_=np.zeros((n+np.shape(x_train)[1],1)) for c in range(n): y_train_[c]= y_train[c] import sklearn model = sklearn.linear_model.LinearRegression() model.fit(x_train,y_train.reshape(-1,1)) return model # Score def score(m,x_test,y_test, reg_param): n=np.shape(x_train)[0] x_test=np.concatenate((x_test,reg_param*np.identity(n)),axis=1) y_test_=np.zeros((n+np.shape(x_test)[1],1)) for c in range(n): y_test_[c]= y_test[c] return m.score(x_test,y_test.reshape(-1,1)) # Load data = np.loadtxt('datasets/dataset_3.txt', delimiter=',') n = data.shape[0] n = int(np.round(n*0.5)) x_train = data[0:n,0:100] y_train = data[0:n,100] x_test = data[n:2*n,0:100] y_test = data[n:2*n,100] # Params a=np.zeros(5) for i in range(-2,2): a[i+2]=10**i # Iterate rstr =np.zeros(5) rsts =np.zeros(5) for j in range(0,5): m =ridge(x_train,y_train,a[i]) rstr[j]=score(m,x_train,y_train,a[j]) rsts[i]=score(m,x_test,y_test,a[i]) # Plot plt.plot(a,rstr) plt.plot(a,rsts)<jupyter_output><empty_output><jupyter_text> ** Corrected version: ** <jupyter_code># Load packages from sklearn.linear_model import LinearRegression as Lin_Reg # Functions: # Fit def ridge(x_train, y_train, reg_param): n, p = np.shape(x_train) # x is an n x p matrix x_train = np.concatenate((x_train, np.sqrt(10.0**reg_param)*np.identity(p)), axis=0) y_train = np.concatenate((y_train, np.zeros(p)), axis=0) model = Lin_Reg() model.fit(x_train, y_train) return model # Score def score(m, x_test,y_test, reg_param): n, p = np.shape(x_train) # x is an n x p matrix x_test = np.concatenate((x_test, np.sqrt(10.0**reg_param)*np.identity(p)), axis=0) y_test = np.concatenate((y_test, np.zeros(p)), axis=0) return m.score(x_test, y_test) # Load data = np.loadtxt('datasets/dataset_3.txt', delimiter=',') # Extract out x and y x_df = data[:, 0:-1] y_df = data[:, -1] n = data.shape[0] n_train = int(np.round(n*0.5)) # shuffle data and split into test and train sets indices = np.random.permutation(range(n)) train_indices = indices[0:n_train] test_indices = indices[n_train:] x_standardised = x_df x_train = x_standardised[train_indices,:] y_train = y_df[train_indices] x_test = x_standardised[test_indices,:] y_test = y_df[test_indices] # Generate an array of lambda values lambdas=np.linspace(-2,2, 100) num_lambdas = len(lambdas) # Initialise empty arrays for storing r2 values r2_train =np.zeros(num_lambdas) r2_test =np.zeros(num_lambdas) # Iterate over the values of lambda for i in range(num_lambdas): # fit a ridge regression model on the training set model = ridge(x_train,y_train,lambdas[i]) # predict on the test r2_train[i]=score(model, x_train,y_train,lambdas[i]) r2_test[i]=score(model, x_test,y_test, lambdas[i]) # Ploting fig, ax = plt.subplots(1, 1, figsize=(18, 8)) ax.semilogx(10.0**lambdas, (r2_train), c='b', label='Ridge: Train') ax.semilogx(10.0**lambdas, (r2_test), c='r', label='Ridge: Test') ax.set_xlabel('Regularization parameter $\lambda$') ax.set_ylabel(r'$R^2$ score') ax.legend(loc = 'best') plt.grid() <jupyter_output><empty_output><jupyter_text> ** Comment: ** The best R^2 in the test set is reached with 0.79259, i.e., when the lamda value is slightly smaller than 10'0. ** Grade: ** Correctness -The student made a couple of errors in the code. Interpretation (if applicable) -The student didn't add interpretation. Code/algorithm design -The student used poor labelling for the variables -The student didn't really comment in the code Presentation -No axes labels in the plots and no legend Grade 3.5/5 ## Challenge Problem: Predicting Outcome of a Fund-raising Campaign You are provided a data set containing details of mail sent to 95,412 potential donors for a fund-raising campaign of a not-for-profit organization. This data set also contains the amount donated by each donor. The task is to build a model that can estimate the amount that a donor would donate using his/her attributes. The data is contained in the file `dataset_4.txt`. Each row contains 376 attributes for a donor, followed by the donation amount. **Note**: For additional information about the attributes used, please look up the file `dataset_4_description.txt`. This files also contains details of attributes that have been omitted from the data set. ### Part (a): Fit regression model Build a suitable model to predict the donation amount. How good is your model? ### Part (b): Evaluate the total profit of the fitted model Suppose you are told that the cost of mailing the donor is \$7. Use your model to maximize profit. Implement, explain and rigorously justify your strategy. How does your strategry compare with blanket mailing everyone. ### Part (c): Further Discussion In hindsight, thoroughly discuss the appropriatenes of using a regression model for this dataset (you must at least address the suitability with respect to profit maximization and model assumptions). Rigorously justify your reasoning. <jupyter_code>## Function def Ridge_Regression(x_test, y_test, x_train, y_train, min_el, max_el): # Create lambdas spaced between min and max ells = np.array(range(min_el, max_el+1)) num_lambdas = len(ells) num_datapoints, num_predictors = np.shape(x_train) # Initialise empty array train_r_squared = np.zeros(num_lambdas) test_r_squared = np.zeros(num_lambdas) coeff_a =np.zeros((num_lambdas, num_predictors)) test_mse = np.zeros(num_lambdas) # Iterate through the lambdas for i, ell in enumerate(ells): reg = Ridge_Reg(alpha=10**ell) reg.fit(x_train, y_train) # calculate MSE test_mse[i] = np.sum((y_test-reg.predict(x_test))**2)/float(len(y_test)) test_r_squared[i] = reg.score(x_test, y_test) train_r_squared[i] = reg.score(x_train, y_train) print 'Lambda = ', 10**ell, ' Test R2 = ', reg.score(x_test, y_test) # save the coefficients for plotting coeff_a[i,:] = reg.coef_ return train_r_squared, test_r_squared, coeff_a, ells, test_mse def Lasso_Regression(x_test, y_test, x_train, y_train, min_el, max_el): # Create a vector of lambdas ells = np.linspace(min_el, max_el, 100) num_lambdas = len(ells) num_predictors = np.shape(x_train)[1] # Empty arrays to store r2 values and coefficients train_r_squared = np.zeros(num_lambdas) test_r_squared = np.zeros(num_lambdas) coeff_a = np.zeros((num_lambdas, num_predictors)) test_mse = np.zeros(num_lambdas) # Loop for i, ell in enumerate(ells): # Ridge regression reg = Lasso_Reg(alpha=10**ell) reg.fit(x_train, y_train) test_mse[i] = np.sum((y_test-reg.predict(x_test))**2)/float(len(y_test)) # Calculate R2 r2_test = reg.score(x_test, y_test) r2_train = reg.score(x_train, y_train) test_r_squared[i] = r2_test train_r_squared[i] = r2_train coeff_a[i,:] = reg.coef_ return train_r_squared, test_r_squared, coeff_a, ells, test_mse # Load the dataset data4 = pd.read_csv('datasets/dataset_4.txt', sep=',') # Extract the predictors and the response df_x4 = data4.iloc[:, 0:-1] df_y4 = data4.iloc[:,-1] print 'Shape before expansion: ', df_x4.shape num_predictors4 = np.shape(df_x4)[1] data4_expanded = pd.DataFrame({}) # Loop for column in df_x4.columns: # For categorical variables, is the number of unique items < 20 or data type is an object if (len(df_x4[column].unique()) < 20 or (df_x4[column].dtype == np.dtype('object'))): # apply one-hot encoding encoded_col = pd.get_dummies(df_x4[column]) data4_expanded = pd.concat([data4_expanded, encoded_col], axis=1) else: data4_expanded = pd.concat([data4_expanded, df_x4[column]], axis=1) data4_expanded.head() # Extract the predictors and the response x4 = data4_expanded.iloc[:, 0:-1].values y4 = df_y4.values.reshape(len(df_y4.values), 1) # Split into train and test set n = x4.shape[0] n_train = int(np.round(n*0.7)) indices = np.random.permutation(range(n)) train_indices = indices[0:n_train] test_indices = indices[n_train:] # The first 25% are in the training set, the rest is in the test set (assuming the data has been ran. shuffeled) x4_train = x4[train_indices,:] y4_train = y4[train_indices] x4_test = x4[test_indices,:] y4_test = y4[test_indices] # OLS regression reg4 = Lin_Reg() reg4.fit(x4_train, y4_train) train_rsquared4 = reg4.score(x4_train, y4_train) test_rsquared4 = reg4.score(x4_test, y4_test) # Print results print 'Shape of Training set', x4_train.shape print 'Shape of Testing set', x4_test.shape print 'Plain Regression: R^2 score on training set', train_rsquared4 print 'Plain Regression: R^2 score on test set', test_rsquared4<jupyter_output>Shape of Training set (6250L, 3159L) Shape of Testing set (2678L, 3159L) Plain Regression: R^2 score on training set 0.543766085401 Plain Regression: R^2 score on test set -2.99313600933e+14 <jupyter_text> ** Comment: ** The OLS Regression performs quite bad on the test data. This is because the number of predictors is very large, i.e., the same order as the number of data points. Furthermore, on the test set, the number of predictors is larger than the number of samples. ### Ridge Regression<jupyter_code># Ridge regression ridge_reg4 = Ridge_Reg(alpha=1.0) ridge_reg4.fit(x4_train, y4_train) train_rsquared4 = ridge_reg4.score(x4_train, y4_train) test_rsquared4 = ridge_reg4.score(x4_test, y4_test) # Print results print 'Ridge Regression: R^2 score on training set', train_rsquared4 print 'Ridge Regression: R^2 score on test set', test_rsquared4<jupyter_output>Ridge Regression: R^2 score on training set 0.516172699128 Ridge Regression: R^2 score on test set -0.109274659067 <jupyter_text> ** Comment: ** Ride regression wiht a $\lambda$ value of 1 performs still very badly as the R2 on the test set is negative. <jupyter_code># Trying with different values of lambda train_r_squared, test_r_squared, coeff_a, lambdas, y_test_predict = Ridge_Regression(x4_test, y4_test, x4_train, y4_train, -5,15) # Plotting fig, ax = plt.subplots(1, 1, figsize=(18, 8)) ax.semilogx(10.0**lambdas, (train_r_squared), c='b', label='Ridge: Train set', color="darkblue", alpha=0.6, linewidth=3) ax.semilogx(10.0**lambdas, (test_r_squared), c='r', label='Ridge: Test set', color="darkred", alpha=0.6, linewidth=3) ax.set_xlabel('Regularization parameter $\lambda$') ax.set_ylabel(r'$R^2$ score') ax.legend(loc = 'best') ax.grid() plt.xlim(10**(-5), 10**(11)) <jupyter_output><empty_output><jupyter_text> ** Comment: ** Itterating over different values of $\lambda$ shows that the R2 score can be improved. The best $R^2$ of 0.0731 is achieved for $\lambda = 1000$. For values of $\lambda$ smaller than 10, the R2 is negative and for values bigger than 1000, the R2 starts moving closer to zero. ### Lasso Regression<jupyter_code># Lasso regression train_r_squared, test_r_squared, coeff_a, lambdas, test_mse = Lasso_Regression(x4_test, y4_test, x4_train, y4_train, -5,15) # Plotting fig, ax = plt.subplots(1, 1, figsize=(18, 8)) ax.semilogx(10.0**lambdas, (train_r_squared), c='b', label='Lasso: Train set', color="darkblue", alpha=0.6, linewidth=3) ax.semilogx(10.0**lambdas, (test_r_squared), c='r', label='Lasso: Test set', color="darkred", alpha=0.6, linewidth=3) ax.set_xlabel('Regularization parameter $\lambda$') ax.set_ylabel(r'$R^2$ score') ax.legend(loc = 'best') ax.grid() plt.xlim(10**(-5), 10**(11)) <jupyter_output><empty_output><jupyter_text> ** Comment: ** The highest R2 value of the Lasso regression is reached at around 0.2. <jupyter_code>## Evaluate profit ridge_reg4 = Ridge_Reg(alpha=1000) ridge_reg4.fit(x4_train, y4_train) y_predict = ridge_reg4.predict(x4).flatten() # Select 200 people to mail donors_sorted = sorted(y_predict, reverse = True) donors = y4.reshape(len(y4)) # Iterate max_num_donors = 500 max_profit = np.zeros(max_num_donors) random_profit = np.zeros(max_num_donors) for n in xrange(max_num_donors): max_profit[n] = sum(donors_sorted[0:n]) - n*7 random_profit[n] = np.sum(np.random.choice(donors, n, replace=False)) - n*7 # Plotting fig, ax = plt.subplots(1, 1, figsize=(18, 8)) ax.plot(xrange(max_num_donors), max_profit, color="darkblue", alpha=0.6, linewidth=3) ax.plot(xrange(max_num_donors), random_profit, color="darkred", alpha=0.6, linewidth=3) ax.set_xlabel('Number of donors mailed') ax.set_ylabel('Profit') ax.grid() <jupyter_output><empty_output>
no_license
/hw4/Hagmann_Tim_CS109A_HW4.ipynb
greenore/ac209a-coursework
16
<jupyter_start><jupyter_text><jupyter_code>## module choleski # L = choleski(a) # Choleski decomposition: [L][L]transpose = [a] # x = choleskiSol(L,b) # Solution phase of Choleski’s decomposition method ## import numpy as np import math #import error def choleski(a): n = len(a) for k in range(n): #try: # a[k,k] = math.sqrt(a[k,k] \ # - np.dot(a[k,0:k],a[k,0:k])) #except ValueError: # error.err('Matrix is not positive definite') for i in range(k+1,n): a[i,k] = (a[i,k] - np.dot(a[i,0:k],a[k,0:k]))/a[k,k] for k in range(1,n): a[0:k,k] = 0.0 return a def choleskiSol(L,b): n = len(b) # Solution of [L]{y} = {b} for k in range(n): b[k] = (b[k] - np.dot(L[k,0:k],b[0:k]))/L[k,k] # Solution of [L_transpose]{x} = {y} for k in range(n-1,-1,-1): b[k] = (b[k] - np.dot(L[k+1:n,k],b[k+1:n]))/L[k,k] return b a = np.array([[ 1.44, -0.36, 5.52, 0.0], \ [-0.36, 10.33, -7.78, 0.0], \ [ 5.52, -7.78, 28.40, 9.0], \ [ 0.0, 0.0, 9.0, 61.0]]) b = np.array([0.04, -2.15, 0.0, 0.88]) aOrig = a.copy() L = choleski(a) x = choleskiSol(L,b) print("x =",x) print('\nCheck: A*x =\n',np.dot(aOrig,x)) input("\nPress return to exit")<jupyter_output>x = [ 0.01661073 -0.02010266 -0.00030452 0.00023723] Check: A*x = [ 0.02947544 -0.21127116 0.24157649 0.01173005] Press return to exity
no_license
/Choleski.ipynb
NGUYEN-VAN-HCMUT/Code_Numerical_Method
1
<jupyter_start><jupyter_text># Trip data exploration## Class retrieving the data from the C++ server program<jupyter_code>import numpy import pandas class DataReader: def __init__( self, cppToPythonPipeName, pythonToCppPipeName ): self.__cppToPythonPipeName = cppToPythonPipeName self.__pythonToCppPipeName = pythonToCppPipeName return def exit( self ): fo = open( self.__pythonToCppPipeName, 'w' ) fo.write( "exit" ) fo.close() return def driverList( self ): result = [] fo = open( self.__pythonToCppPipeName, 'w' ) fo.write( "drivers" ) fo.close() fi = open( self.__cppToPythonPipeName, 'r' ) n = int( fi.readline() ) for i in range(n): result.append( int(fi.readline()) ) fi.close() return result def tripList( self, driverId ): result = [] fo = open( self.__pythonToCppPipeName, 'w' ) fo.write( "trips " + str(driverId) ) fo.close() fi = open( self.__cppToPythonPipeName, 'r' ) n = int( fi.readline() ) for i in range(n): result.append( int(fi.readline()) ) fi.close() return result def rawData( self, driverId, tripId ): fo = open( self.__pythonToCppPipeName, 'w' ) fo.write( "rawdata " + str(driverId) + " " + str(tripId) ) fo.close() result = numpy.array([]).reshape([0,2]) fi = open( self.__cppToPythonPipeName, 'r' ) n = int( fi.readline() ) for i in range(n): data = fi.readline().strip().split() result = numpy.vstack((result, numpy.array([float(data[0]),float(data[1])]) )) fi.close() return result def segmentData( self, driverId, tripId ): fo = open( self.__pythonToCppPipeName, 'w' ) fo.write( "segments " + str(driverId) + " " + str(tripId) ) fo.close() sresult = [] fi = open( self.__cppToPythonPipeName, 'r' ) lines = fi.readlines() fi.close() nsegs = int(lines[0].strip()) i = 1 for s in range(nsegs): result = numpy.array([]).reshape([0,2]) n = int(lines[i].strip()) i += 1 for ipoint in range(n): data = lines[i].strip().split() i += 1 point = numpy.array([float(data[0]),float(data[1])]) result = numpy.vstack((result, point )) sresult.append( result ) return sresult def accelerationValues( self, driverId, tripId ): result = [] fo = open( self.__pythonToCppPipeName, 'w' ) fo.write( "acceleration " + str(driverId) + " " + str(tripId) ) fo.close() fi = open( self.__cppToPythonPipeName, 'r' ) n = int( fi.readline() ) for i in range(n): result.append( float(fi.readline()) ) fi.close() return result def travelDuration( self, driverId, tripId ): fo = open( self.__pythonToCppPipeName, 'w' ) fo.write( "travelDuration " + str(driverId) + " " + str(tripId) ) fo.close() fi = open( self.__cppToPythonPipeName, 'r' ) v = float( fi.readline() ) fi.close() return v def travelLength( self, driverId, tripId ): fo = open( self.__pythonToCppPipeName, 'w' ) fo.write( "travelLength " + str(driverId) + " " + str(tripId) ) fo.close() fi = open( self.__cppToPythonPipeName, 'r' ) v = float( fi.readline() ) fi.close() return v def distanceOfEndPoint( self, driverId, tripId ): fo = open( self.__pythonToCppPipeName, 'w' ) fo.write( "distanceOfEndPoint " + str(driverId) + " " + str(tripId) ) fo.close() fi = open( self.__cppToPythonPipeName, 'r' ) v = float( fi.readline() ) fi.close() return v def speedValues( self, driverId, tripId ): result = numpy.array([]).reshape([0,1]) fo = open( self.__pythonToCppPipeName, 'w' ) fo.write( "speed " + str(driverId) + " " + str(tripId) ) fo.close() fi = open( self.__cppToPythonPipeName, 'r' ) n = int( fi.readline() ) for i in range(n): result = numpy.vstack( ( result, float(fi.readline() ) ) ) fi.close() return result def fftValues( self, driverId, tripId ): result = numpy.array([]) fo = open( self.__pythonToCppPipeName, 'w' ) fo.write( "fft " + str(driverId) + " " + str(tripId) ) fo.close() fi = open( self.__cppToPythonPipeName, 'r' ) n = int( fi.readline() ) for i in range(n): result = numpy.hstack( ( result, float(fi.readline() ) ) ) fi.close() return result def fftDirectionValues( self, driverId, tripId ): result = numpy.array([]) fo = open( self.__pythonToCppPipeName, 'w' ) fo.write( "fft_direction " + str(driverId) + " " + str(tripId) ) fo.close() fi = open( self.__cppToPythonPipeName, 'r' ) n = int( fi.readline() ) for i in range(n): result = numpy.hstack( ( result, float(fi.readline() ) ) ) fi.close() return result def accelerationValues( self, driverId, tripId ): result = numpy.array([]).reshape([0,1]) fo = open( self.__pythonToCppPipeName, 'w' ) fo.write( "acceleration " + str(driverId) + " " + str(tripId) ) fo.close() fi = open( self.__cppToPythonPipeName, 'r' ) n = int( fi.readline() ) for i in range(n): result = numpy.vstack( ( result, float(fi.readline() ) ) ) fi.close() return result def directionValues( self, driverId, tripId ): result = numpy.array([]).reshape([0,1]) fo = open( self.__pythonToCppPipeName, 'w' ) fo.write( "direction " + str(driverId) + " " + str(tripId) ) fo.close() fi = open( self.__cppToPythonPipeName, 'r' ) n = int( fi.readline() ) for i in range(n): result = numpy.vstack( ( result, float(fi.readline() ) ) ) fi.close() return result def speedAccelerationDirectionValues( self, driverId, tripId ): result = numpy.array([]).reshape([0,3]) fo = open( self.__pythonToCppPipeName, 'w' ) fo.write( "speedAccelerationDirection " + str(driverId) + " " + str(tripId) ) fo.close() fi = open( self.__cppToPythonPipeName, 'r' ) n = int( fi.readline() ) for i in range(n): line = fi.readline().strip().split() line = numpy.array([float(line[0]),float(line[1]),float(line[2])]) result = numpy.vstack( ( result, line ) ) fi.close() return result def speedQuantiles( self, driverId, tripId ): result = numpy.array([]).reshape([0,1]) fo = open( self.__pythonToCppPipeName, 'w' ) fo.write( "speedQuantiles " + str(driverId) + " " + str(tripId) ) fo.close() fi = open( self.__cppToPythonPipeName, 'r' ) n = int( fi.readline() ) for i in range(n): result = numpy.vstack( ( result, float(fi.readline() ) ) ) fi.close() return result def accelerationQuantiles( self, driverId, tripId ): result = numpy.array([]).reshape([0,1]) fo = open( self.__pythonToCppPipeName, 'w' ) fo.write( "accelerationQuantiles " + str(driverId) + " " + str(tripId) ) fo.close() fi = open( self.__cppToPythonPipeName, 'r' ) n = int( fi.readline() ) for i in range(n): result = numpy.vstack( ( result, float(fi.readline() ) ) ) fi.close() return result def directionQuantiles( self, driverId, tripId ): result = numpy.array([]).reshape([0,1]) fo = open( self.__pythonToCppPipeName, 'w' ) fo.write( "directionQuantiles " + str(driverId) + " " + str(tripId) ) fo.close() fi = open( self.__cppToPythonPipeName, 'r' ) n = int( fi.readline() ) for i in range(n): result = numpy.vstack( ( result, float(fi.readline() ) ) ) fi.close() return result def allTripMetrics( self ): fo = open( self.__pythonToCppPipeName, 'w' ) fo.write( "allTripMetrics" ) fo.close() fi = open( self.__cppToPythonPipeName, 'r' ) columnNames = fi.readline().strip().split() nVariables = len(columnNames) n = int( fi.readline() ) result = numpy.zeros((n,nVariables)) for i in range(n): line = fi.readline().strip().split(); for j in range(nVariables): result[i,j] = float(line[j]) fi.close() return result, columnNames def driverTripMetrics( self, driverId ): fo = open( self.__pythonToCppPipeName, 'w' ) fo.write( "driverTripMetrics " + str(driverId) ) fo.close() fi = open( self.__cppToPythonPipeName, 'r' ) columnNames = fi.readline().strip().split() nVariables = len(columnNames) n = int( fi.readline() ) result = numpy.zeros((n,nVariables)) for i in range(n): line = fi.readline().strip().split(); for j in range(nVariables): result[i,j] = float(line[j]) fi.close() return result, columnNames <jupyter_output><empty_output><jupyter_text>## A function to plot the raw data<jupyter_code># Gets the angle from y to x def angleOfVectors( x, y ): mx = numpy.sqrt(x[0]**2 + x[1]**2) if mx == 0: return 0 my = numpy.sqrt(y[0]**2 + y[1]**2) if my == 0: return 0 mxy = mx * my sint = ( x[0]*y[1] - x[1]*y[0] ) / mxy cost = ( x[0]*y[0] + x[1]*y[1] ) / mxy if sint > 1: sint = 1 if sint < -1: sint = -1 if cost >= 0: return numpy.arcsin( sint ) else: if sint > 0: return numpy.pi - numpy.arcsin( sint ) else: if cost > 1: cost=1 if cost < -1: cost=-1 return -( numpy.arccos( cost ) ) # Function for plotting the raw data of the segments def plotSegmentData( segments ): tripData = numpy.array([]).reshape([0,2]) tAngles = numpy.array([]) tSpeed = numpy.array([]) tAcceleration = numpy.array([]) for segment in segments: speedVectors = numpy.diff( segment, axis = 0 ) speedValues = 3.6 * numpy.apply_along_axis( lambda x: numpy.sqrt( x[0]**2+x[1]**2), 1, speedVectors ) if len(speedValues) > 1: accelerationValues = numpy.diff( speedValues ) / 3.6 angles = numpy.zeros( len(speedVectors) - 1 ) for i in range(len(angles)): v1 = speedVectors[i] v2 = speedVectors[i+1] angles[i] = angleOfVectors(v1,v2) * 180 / numpy.pi tAngles = numpy.hstack( (tAngles, angles) ) tAcceleration = numpy.hstack( (tAcceleration, accelerationValues) ) tSpeed = numpy.hstack( (tSpeed, speedValues) ) tripData = numpy.vstack( (tripData, segment) ) # Draw the raw data plt.figure( figsize = (10,10), facecolor='lightblue') ax = plt.subplot(321) ax.plot( tripData[0:,0], tripData[0:,1], 'r.', alpha = 0.1 ) ax.grid(True) ax = plt.subplot(322) ax.plot( numpy.arange(0,len(tripData)), tripData[0:,1], 'b.', alpha = 0.1 ) ax.grid(True) ax = plt.subplot(323) ax.plot( tripData[0:,0], numpy.arange(0,len(tripData)), 'b.', alpha = 0.1 ) ax.grid(True) ax = plt.subplot(324) ax.plot( numpy.arange(0,len(tAngles)), tAngles, 'r-' ) ax.plot( numpy.arange(0,len(tAngles)), tAngles, 'r.' ) ax.grid(True) ax = plt.subplot(325) ax.plot( numpy.arange(0,len(tSpeed)), tSpeed, 'g-' ) ax.plot( numpy.arange(0,len(tSpeed)), tSpeed, 'g.', alpha=0.15 ) ax.grid(True) ax = plt.subplot(326) ax.plot( numpy.arange(0,len(tAcceleration)), tAcceleration, 'b-' ) ax.plot( numpy.arange(0,len(tAcceleration)), tAcceleration, 'b.', alpha=0.15 ) ax.grid(True) return <jupyter_output><empty_output><jupyter_text>## Function selecting the middle x% of the data for a given column<jupyter_code>def selectVariable(data, column, percentage = 99 ): percentile = (100 - percentage) / 2 cut_low = numpy.percentile(data[0:,column], percentile ) cut_high = numpy.percentile(data[0:,column], 100-percentile) selection = data[(data[0:,column] > cut_low) & (data[0:,column]<cut_high)][0:,column] return selection<jupyter_output><empty_output><jupyter_text>## Function for rebinning a histogram<jupyter_code>def reBin( originalBins, numberOfBins ): lowEdge = originalBins[0] highEdge = originalBins[-1] binSize = ( highEdge - lowEdge ) / numberOfBins result = [] for i in range(numberOfBins): result.append( lowEdge + i * binSize) result.append(highEdge) return result<jupyter_output><empty_output><jupyter_text>## Function for normalising an array after removing the outliars<jupyter_code>def normaliseData( x ): orderedX = numpy.sort(x[ ~ numpy.isnan( x ) ]) orderedX = orderedX[0.0025*len(orderedX) : 0.9975*len(orderedX)] m = numpy.mean( orderedX ) s = numpy.std( orderedX ) minValue = orderedX[0] maxValue = orderedX[-1] result = numpy.zeros(len(x)) for i in range(len(result)): if ( numpy.isnan(x[i]) or ( x[i] < minValue ) or ( x[i] > maxValue ) ): result[i] = numpy.nan else: result[i] = ( x[i] - m ) / s return result<jupyter_output><empty_output><jupyter_text>## Function for performing a Primary Component Analysis (on normalised data)<jupyter_code>def findPCA( x ): (nSamples, dimensions) = x.shape # Calculate mean vector meanVector = numpy.apply_along_axis( numpy.mean, 0, x ) # Calculate covariance matrix from scatted matrix scatter_matrix = numpy.zeros((dimensions,dimensions)) for i in range(nSamples): d = ( x[i,:] - meanVector ).reshape( dimensions, 1 ) scatter_matrix += d.dot( d.T ) scatter_matrix /= nSamples - 1 # Get the eigenvalues eig_val, eig_vec = numpy.linalg.eig(scatter_matrix) pairs = [( numpy.abs(eig_val[i]), eig_vec[i] ) for i in range(dimensions) ] pairs.sort() pairs.reverse() v = numpy.zeros(dimensions) w = [] for i in range(dimensions): v[i] = pairs[i][0] w.append(pairs[i][1]) explained_variance_ratio = v / v.sum() return (v, w, explained_variance_ratio)<jupyter_output><empty_output><jupyter_text>## Function to plot the speed, acceleration and direction values<jupyter_code>def plotSpeedAccelerationDirection( dataReader, driverId, tripId ): values = dataReader.speedAccelerationDirectionValues(driverId, tripId) plt.figure(figsize=(10,10), facecolor='lightblue') ax = plt.subplot(331) ax.plot( numpy.arange(0,len(values) ), values[0:,0], 'g-' ) ax.set_ylabel('Speed $m/s$') ax.grid(True) ax = plt.subplot(332) ax.plot( numpy.arange(0,len(values) ), values[0:,1], 'b-' ) ax.set_ylabel('Acceleration $m/s^2$') ax.grid(True) ax = plt.subplot(333) ax.plot( numpy.arange(0,len(values) ), values[0:,2], 'r-' ) ax.set_ylabel('Direction $rad$') ax.grid(True) ax = plt.subplot(334) ax.plot( values[0:,0], values[0:,1], 'g.', alpha=0.3 ) ax.set_xlabel('Speed $m/s$') ax.set_ylabel('Acceleration $m/s^2$') ax.grid(True) slope, intercept, r_value, p_value, std_err =scipy.stats.linregress( values[0:,0], values[0:,1]) r2 = r_value**2 xticks = ax.xaxis.get_majorticklocs() yticks = ax.yaxis.get_majorticklocs() xorig = xticks[0] + 0.2 * (xticks[-1] - xticks[0]) yorig = yticks[0] + 0.8 * (yticks[-1] - yticks[0]) ax.text(xorig, yorig,'$R^2$ : ' + str(numpy.around(r2,3)), ha='left') ax.plot( xticks, xticks * slope + intercept, 'g-') ax = plt.subplot(335) ax.plot( values[0:,0], values[0:,2], 'b.', alpha=0.3 ) ax.set_xlabel('Speed $m/s$') ax.set_ylabel('Direction $rad$') ax.grid(True) slope, intercept, r_value, p_value, std_err =scipy.stats.linregress( values[0:,0], values[0:,2]) r2 = r_value**2 xticks = ax.xaxis.get_majorticklocs() yticks = ax.yaxis.get_majorticklocs() xorig = xticks[0] + 0.2 * (xticks[-1] - xticks[0]) yorig = yticks[0] + 0.8 * (yticks[-1] - yticks[0]) ax.text(xorig, yorig,'$R^2$ : ' + str(numpy.around(r2,3)), ha='left') ax.plot( xticks, xticks * slope + intercept, 'b-') ax = plt.subplot(336) ax.plot( values[0:,1], values[0:,2], 'r.', alpha=0.3 ) ax.set_xlabel('Acceleration $m/s^2$') ax.set_ylabel('Direction $rad$') ax.grid(True) slope, intercept, r_value, p_value, std_err =scipy.stats.linregress( values[0:,1], values[0:,2]) r2 = r_value**2 xticks = ax.xaxis.get_majorticklocs() yticks = ax.yaxis.get_majorticklocs() xorig = xticks[0] + 0.2 * (xticks[-1] - xticks[0]) yorig = yticks[0] + 0.8 * (yticks[-1] - yticks[0]) ax.text(xorig, yorig,'$R^2$ : ' + str(numpy.around(r2,3)), ha='left') ax.plot( xticks, xticks * slope + intercept, 'r-') ax = plt.subplot(337) ax.hist( values[0:,0], 30, normed=True, facecolor='green' ) ax.set_xlabel('Speed $m/s$') ax.grid(True) ax = plt.subplot(338) ax.hist( values[0:,1], 30, normed=True, facecolor='blue' ) ax.set_xlabel('Acceleration $m/s^2$') ax.grid(True) ax = plt.subplot(339) ax.hist( values[0:,2], 30, normed=True, facecolor='red' ) ax.set_xlabel('Direction $rad$') ax.grid(True) return<jupyter_output><empty_output><jupyter_text>## Main scriptSet up matplotlib and other packages<jupyter_code>%matplotlib inline import matplotlib.pyplot as plt import scipy.stats import sklearn.decomposition<jupyter_output><empty_output><jupyter_text>Create the connection to the server<jupyter_code>dataReader = DataReader('cpptopythonpipe','pythontocpppipe') driverIds = dataReader.driverList() (metricData, variableNames) = dataReader.allTripMetrics() driverId = 1 if driverId not in driverIds: raise BaseException("Invalid driver id") (driverMetrics, variableNames) = dataReader.driverTripMetrics(driverId) # Plot the trip metrics numberOfDriverBins = 20 numberOfBackgroundBins = 150 numberOfMetrics = len(variableNames) numberOfImageColumns = 4 numberOfImageRows = int( numpy.ceil(numberOfMetrics / numberOfImageColumns )) plt.figure( figsize = (numberOfImageColumns * 4, numberOfImageRows * 4) ) for i in range(numberOfMetrics): ax = plt.subplot( numberOfImageRows, numberOfImageColumns, i + 1 ) generalData = numpy.sort(metricData[ ~ numpy.isnan( metricData[0:,i] ) ][0:,i]) generalData = generalData[0.0025*len(generalData) : 0.9975*len(generalData)] (v,b,o) = plt.hist(generalData, bins=numberOfBackgroundBins, normed=True, alpha=0.3) driverMetricData = driverMetrics[ ~ numpy.isnan( driverMetrics[0:,i] ) ][0:,i] (v,b,o) = plt.hist(driverMetricData, bins=b[0] + numpy.arange(numberOfDriverBins+1)*(b[-1]-b[0])/numberOfDriverBins, normed=True, alpha=0.3) t = ax.set_title( variableNames[i] ) # Perform PCA analysis after cleaning and normalising normalisedData = numpy.apply_along_axis(normaliseData,0,metricData) normalisedData=normalisedData[~numpy.any( numpy.isnan(normalisedData), axis=1 )][0:,2:] numberOfPrincipalComponents = 10 pca = sklearn.decomposition.PCA(n_components = numberOfPrincipalComponents ) res = pca.fit(normalisedData) # Transform the data transformedData = pca.transform(normalisedData) normalisedDataD = numpy.apply_along_axis(normaliseData,0,driverMetrics) normalisedDataD=normalisedDataD[~numpy.any( numpy.isnan(normalisedDataD), axis=1 )][0:,2:] transformedDataD = pca.transform(normalisedDataD) plt.figure( figsize = (numberOfImageColumns * 4, numberOfImageRows * 4), facecolor='lightblue') for i in range(numberOfPrincipalComponents): ax = plt.subplot( numberOfImageRows, numberOfImageColumns, i + 1 ) h = plt.hist(transformedData[0:,i], numberOfBackgroundBins, normed=True) h = plt.hist(transformedDataD[0:,i], numberOfDriverBins, normed=True) len(transformedData) driverId=2811 tripId=115 rawData=dataReader.rawData(driverId,tripId) segmentData = dataReader.segmentData(driverId,tripId) plotSegmentData( [rawData,]) plotSegmentData(segmentData) # Speed, Acceleration, direction histograms driverId = 1 tripId = 2 plotSpeedAccelerationDirection( dataReader, driverId, tripId ) dataReader.exit()<jupyter_output><empty_output>
no_license
/cpp/TripDataExplore.ipynb
ip21/Kaggle-AXA-telematics
9
<jupyter_start><jupyter_text>- prepare a list with the total of days per month - make another list to store the number of days related to each month - read the file and store the elements - define a function to go over the numbers of days of each month and display the sum of steps - end <jupyter_code>months=["January","February","March","April", "May","June","July", "August","September","October","November","December"] days_of_months=[31,28,31,30,31,30,31,31,30,31,30,31] def sum_of_steps_per_month(my_file_object,month,steps,start): sum_of_steps = 0 days=days_of_months[months.index(month)] for day in range(start, days + start): sum_of_steps = sum_of_steps + steps[day] avg_of_steps = sum_of_steps / days line=month+"\t\t\t"+f'{avg_of_steps:.1f}' my_file_object.write(line + '\n') return start + days def main(): steps_file_object = open("steps.txt", 'r') steps = steps_file_object.readlines() # ok, let's make them into actual numbers for index in range(len(steps)): # these endlines have to go steps[index] = steps[index].rstrip('\n') # now we can cast them to integers steps[index] = int(steps[index]) steps_file_object.close() start = 0 my_file_object = open("step_averages.txt", "w") my_file_object.write("Month Average steps taken\n") my_file_object.write("------------------------------------------\n") for month in months: start = sum_of_steps_per_month(my_file_object,month,steps,start) # always close your files my_file_object.close() if __name__ == "__main__": main() <jupyter_output><empty_output>
no_license
/notebooks/.ipynb_checkpoints/fernando_tapia_in_class_9-checkpoint.ipynb
tapiagoras/dat-119-spring-2019
1
<jupyter_start><jupyter_text>### Reading Test Scores The Programme for International Student Assessment (PISA) is a test given every three years to 15-year-old students from around the world to evaluate their performance in mathematics, reading, and science. This test provides a quantitative way to compare the performance of students from different parts of the world. In this homework assignment, we will predict the reading scores of students from the United States of America on the 2009 PISA exam. The datasets pisa2009train.csv and pisa2009test.csv contain information about the demographics and schools for American students taking the exam, derived from 2009 PISA Public-Use Data Files distributed by the United States National Center for Education Statistics (NCES). While the datasets are not supposed to contain identifying information about students taking the test, by using the data you are bound by the NCES data use agreement, which prohibits any attempt to determine the identity of any student in the datasets. Each row in the datasets pisa2009train.csv and pisa2009test.csv represents one student taking the exam. The datasets have the following variables: grade: The grade in school of the student (most 15-year-olds in America are in 10th grade) male: Whether the student is male (1/0) raceeth: The race/ethnicity composite of the student preschool: Whether the student attended preschool (1/0) expectBachelors: Whether the student expects to obtain a bachelor's degree (1/0) motherHS: Whether the student's mother completed high school (1/0) motherBachelors: Whether the student's mother obtained a bachelor's degree (1/0) motherWork: Whether the student's mother has part-time or full-time work (1/0) fatherHS: Whether the student's father completed high school (1/0) fatherBachelors: Whether the student's father obtained a bachelor's degree (1/0) fatherWork: Whether the student's father has part-time or full-time work (1/0) selfBornUS: Whether the student was born in the United States of America (1/0) motherBornUS: Whether the student's mother was born in the United States of America (1/0) fatherBornUS: Whether the student's father was born in the United States of America (1/0) englishAtHome: Whether the student speaks English at home (1/0) computerForSchoolwork: Whether the student has access to a computer for schoolwork (1/0) read30MinsADay: Whether the student reads for pleasure for 30 minutes/day (1/0) minutesPerWeekEnglish: The number of minutes per week the student spend in English class studentsInEnglish: The number of students in this student's English class at school schoolHasLibrary: Whether this student's school has a library (1/0) publicSchool: Whether this student attends a public school (1/0) urban: Whether this student's school is in an urban area (1/0) schoolSize: The number of students in this student's school readingScore: The student's reading score, on a 1000-point scale#### Problem 1.1 - Dataset size Load the training and testing sets using the read.csv() function, and save them as variables with the names pisaTrain and pisaTest. How many students are there in the training set?<jupyter_code>pisaTrain <- read.csv('pisa2009train.csv') pisaTest <- read.csv('pisa2009test.csv') nrow(pisaTrain)<jupyter_output><empty_output><jupyter_text>#### Problem 1.2 - Summarizing the dataset Using tapply() on pisaTrain, what is the average reading test score of males?<jupyter_code>tapply( pisaTrain$readingScore, pisaTrain$male, mean)<jupyter_output><empty_output><jupyter_text>Of females? #### Problem 1.3 - Locating missing values Which variables are missing data in at least one observation in the training set? Select all that apply.<jupyter_code>apply( X =pisaTrain, MARGIN = 2, FUN = function(col) sum(is.na(col)))<jupyter_output><empty_output><jupyter_text>#### Problem 1.4 - Removing missing values Linear regression discards observations with missing data, so we will remove all such observations from the training and testing sets. Later in the course, we will learn about imputation, which deals with missing data by filling in missing values with plausible information. Type the following commands into your R console to remove observations with any missing value from pisaTrain and pisaTest: pisaTrain = na.omit(pisaTrain) pisaTest = na.omit(pisaTest) How many observations are now in the training set?<jupyter_code>pisaTrain = na.omit(pisaTrain) pisaTest = na.omit(pisaTest) nrow(pisaTrain)<jupyter_output><empty_output><jupyter_text>How many observations are now in the testing set? <jupyter_code>nrow(pisaTest)<jupyter_output><empty_output><jupyter_text>#### Problem 2.1 - Factor variables Factor variables are variables that take on a discrete set of values, like the "Region" variable in the WHO dataset from the second lecture of Unit 1. This is an unordered factor because there isn't any natural ordering between the levels. An ordered factor has a natural ordering between the levels (an example would be the classifications "large," "medium," and "small"). Which of the following variables is an unordered factor with at least 3 levels? (Select all that apply.)<jupyter_code>summary(pisaTrain)<jupyter_output><empty_output><jupyter_text>raceethWhich of the following variables is an ordered factor with at least 3 levels? (Select all that apply.) grade#### Problem 2.2 - Unordered factors in regression models To include unordered factors in a linear regression model, we define one level as the "reference level" and add a binary variable for each of the remaining levels. In this way, a factor with n levels is replaced by n-1 binary variables. The reference level is typically selected to be the most frequently occurring level in the dataset. As an example, consider the unordered factor variable "color", with levels "red", "green", and "blue". If "green" were the reference level, then we would add binary variables "colorred" and "colorblue" to a linear regression problem. All red examples would have colorred=1 and colorblue=0. All blue examples would have colorred=0 and colorblue=1. All green examples would have colorred=0 and colorblue=0. Now, consider the variable "raceeth" in our problem, which has levels "American Indian/Alaska Native", "Asian", "Black", "Hispanic", "More than one race", "Native Hawaiian/Other Pacific Islander", and "White". Because it is the most common in our population, we will select White as the reference level. Which binary variables will be included in the regression model? (Select all that apply.)#### Problem 2.3 - Example unordered factors Consider again adding our unordered factor race to the regression model with reference level "White". For a student who is Asian, which binary variables would be set to 0? All remaining variables will be set to 1. (Select all that apply.)For a student who is white, which binary variables would be set to 0? All remaining variables will be set to 1. (Select all that apply.) #### Problem 3.1 - Building a model Because the race variable takes on text values, it was loaded as a factor variable when we read in the dataset with read.csv() -- you can see this when you run str(pisaTrain) or str(pisaTest). However, by default R selects the first level alphabetically ("American Indian/Alaska Native") as the reference level of our factor instead of the most common level ("White"). Set the reference level of the factor by typing the following two lines in your R console: pisaTrain$raceeth = relevel(pisaTrain$raceeth, "White") pisaTest$raceeth = relevel(pisaTest$raceeth, "White") Now, build a linear regression model (call it lmScore) using the training set to predict readingScore using all the remaining variables. It would be time-consuming to type all the variables, but R provides the shorthand notation "readingScore ~ ." to mean "predict readingScore using all the other variables in the data frame." The period is used to replace listing out all of the independent variables. As an example, if your dependent variable is called "Y", your independent variables are called "X1", "X2", and "X3", and your training data set is called "Train", instead of the regular notation: LinReg = lm(Y ~ X1 + X2 + X3, data = Train) You would use the following command to build your model: LinReg = lm(Y ~ ., data = Train) What is the Multiple R-squared value of lmScore on the training set?<jupyter_code>str(pisaTrain) pisaTrain$raceeth = relevel(pisaTrain$raceeth, "White") pisaTest$raceeth = relevel(pisaTest$raceeth, "White") str(pisaTrain)<jupyter_output>'data.frame': 2414 obs. of 24 variables: $ grade : int 11 10 10 10 10 10 10 10 11 9 ... $ male : int 1 0 1 0 1 0 0 0 1 1 ... $ raceeth : Factor w/ 7 levels "White","American Indian/Alaska Native",..: 1 4 5 1 6 5 1 5 1 1 ... $ preschool : int 0 1 1 1 1 1 1 1 1 1 ... $ expectBachelors : int 0 1 0 1 1 1 1 0 1 1 ... $ motherHS : int 1 0 1 1 1 1 1 0 1 1 ... $ motherBachelors : int 1 0 0 0 1 0 0 0 0 1 ... $ motherWork : int 1 1 1 0 1 1 1 0 0 1 ... $ fatherHS : int 1 1 1 1 0 1 1 0 1 1 ... $ fatherBachelors : int 0 0 0 0 0 0 1 0 1 1 ... $ fatherWork : int 1 1 0 1 1 0 1 1 1 1 ... $ selfBornUS : int 1 1 1 1 1 0 1 0 1 1 ... $ motherBornUS : int 1 1 1 1 1 0 1 0 1 1 ... $ fatherBornUS : int 1 1 0 1 1 0 1 0 1 1 ... $ englishAtHome : int 1 1 1 1 1 0 1 0 1 1 ... $ computerForSchoolwork: int 1 1 1 1 1 0 1 1 1 1 ... $ read30Mi[...]<jupyter_text>Note that this R-squared is lower than the ones for the models we saw in the lectures and recitation. This does not necessarily imply that the model is of poor quality. More often than not, it simply means that the prediction problem at hand (predicting a student's test score based on demographic and school-related variables) is more difficult than other prediction problems (like predicting a team's number of wins from their runs scored and allowed, or predicting the quality of wine from weather conditions). <jupyter_code>lmScore <- lm(readingScore ~ . , data=pisaTrain ) summary(lmScore)$r.squared<jupyter_output><empty_output><jupyter_text>#### Problem 3.2 - Computing the root-mean squared error of the model What is the training-set root-mean squared error (RMSE) of lmScore?<jupyter_code>SSE<-sum(lmScore$residuals^2) RMSETrain<-sqrt(SSE/nrow(pisaTrain)) RMSETrain<jupyter_output><empty_output><jupyter_text>#### Problem 3.3 - Comparing predictions for similar students Consider two students A and B. They have all variable values the same, except that student A is in grade 11 and student B is in grade 9. What is the predicted reading score of student A minus the predicted reading score of student B?<jupyter_code>predictPisa <- predict(lmScore, pisaTest)<jupyter_output><empty_output><jupyter_text>#### Problem 3.4 - Interpreting model coefficients What is the meaning of the coefficient associated with variable raceethAsian?#### Problem 3.5 - Identifying variables lacking statistical significance Based on the significance codes, which variables are candidates for removal from the model? Select all that apply. (We'll assume that the factor variable raceeth should only be removed if none of its levels are significant.)<jupyter_code>summary(lmScore)<jupyter_output><empty_output><jupyter_text>#### Problem 4.1 - Predicting on unseen data Using the "predict" function and supplying the "newdata" argument, use the lmScore model to predict the reading scores of students in pisaTest. Call this vector of predictions "predTest". Do not change the variables in the model (for example, do not remove variables that we found were not significant in the previous part of this problem). Use the summary function to describe the test set predictions. What is the range between the maximum and minimum predicted reading score on the test set? sin responder <jupyter_code>predTest <- predict(lmScore, pisaTest) summary(predTest) max(predTest)-min(predTest)<jupyter_output><empty_output><jupyter_text>#### Problem 4.2 - Test set SSE and RMSE What is the sum of squared errors (SSE) of lmScore on the testing set?<jupyter_code>SSE <- sum((predTest-pisaTest$readingScore)^2) SSE<jupyter_output><empty_output><jupyter_text>What is the root-mean squared error (RMSE) of lmScore on the testing set? <jupyter_code>RMSE <- sqrt(SSE/nrow(pisaTest)) RMSE<jupyter_output><empty_output><jupyter_text>#### Problem 4.3 - Baseline prediction and test-set SSE What is the predicted test score used in the baseline model? Remember to compute this value using the training set and not the test set.<jupyter_code>baseline <- mean(pisaTrain$readingScore) baseline<jupyter_output><empty_output><jupyter_text>What is the sum of squared errors of the baseline model on the testing set? HINT: We call the sum of squared errors for the baseline model the total sum of squares (SST).<jupyter_code>SST <- sum((pisaTest$readingScore - baseline)^2) SST<jupyter_output><empty_output><jupyter_text>#### Problem 4.4 - Test-set R-squared What is the test-set R-squared value of lmScore?<jupyter_code>1-(SSE/SST)<jupyter_output><empty_output>
no_license
/Unit 2 - Linear Regression/Reading Test Scores.ipynb
JEstebanMejiaV/The.Analytics.Edge
17
<jupyter_start><jupyter_text>## First Innings Score Prediction<jupyter_code># Importing essential libraries import pandas as pd import pickle # Loading the dataset df = pd.read_csv('C:/Users/niyaz/Downloads/IPL-First-Innings-Score-Prediction-Deployment-master/ipl.csv') df.head() # --- Data Cleaning --- # Removing unwanted columns columns_to_remove = ['mid', 'venue', 'batsman', 'bowler', 'striker', 'non-striker'] df.drop(labels=columns_to_remove, axis=1, inplace=True) df.head() df['bat_team'].unique() # Keeping only consistent teams consistent_teams = ['Kolkata Knight Riders', 'Chennai Super Kings', 'Rajasthan Royals', 'Mumbai Indians', 'Kings XI Punjab', 'Royal Challengers Bangalore', 'Delhi Daredevils', 'Sunrisers Hyderabad'] df = df[(df['bat_team'].isin(consistent_teams)) & (df['bowl_team'].isin(consistent_teams))] # Removing the first 5 overs data in every match df = df[df['overs']>=5.0] df.head() print(df['bat_team'].unique()) print(df['bowl_team'].unique()) df.info() # Converting the column 'date' from string into datetime object from datetime import datetime df['date'] = df['date'].apply(lambda x: datetime.strptime(x, '%Y-%m-%d')) # --- Data Preprocessing --- # Converting categorical features using OneHotEncoding method encoded_df = pd.get_dummies(data=df, columns=['bat_team', 'bowl_team']) encoded_df.head() encoded_df.columns # Rearranging the columns encoded_df = encoded_df[['date', 'bat_team_Chennai Super Kings', 'bat_team_Delhi Daredevils', 'bat_team_Kings XI Punjab', 'bat_team_Kolkata Knight Riders', 'bat_team_Mumbai Indians', 'bat_team_Rajasthan Royals', 'bat_team_Royal Challengers Bangalore', 'bat_team_Sunrisers Hyderabad', 'bowl_team_Chennai Super Kings', 'bowl_team_Delhi Daredevils', 'bowl_team_Kings XI Punjab', 'bowl_team_Kolkata Knight Riders', 'bowl_team_Mumbai Indians', 'bowl_team_Rajasthan Royals', 'bowl_team_Royal Challengers Bangalore', 'bowl_team_Sunrisers Hyderabad', 'overs', 'runs', 'wickets', 'runs_last_5', 'wickets_last_5', 'total']] # Splitting the data into train and test set X_train = encoded_df.drop(labels='total', axis=1)[encoded_df['date'].dt.year <= 2016] X_test = encoded_df.drop(labels='total', axis=1)[encoded_df['date'].dt.year >= 2017] y_train = encoded_df[encoded_df['date'].dt.year <= 2016]['total'].values y_test = encoded_df[encoded_df['date'].dt.year >= 2017]['total'].values # Removing the 'date' column X_train.drop(labels='date', axis=True, inplace=True) X_test.drop(labels='date', axis=True, inplace=True) encoded_df.head() # --- Model Building --- # Linear Regression Model from sklearn.linear_model import LinearRegression regressor = LinearRegression() regressor.fit(X_train,y_train) # Creating a pickle file for the classifier filename = 'first-innings-score-lr-model.pkl' pickle.dump(regressor, open(filename, 'wb'))<jupyter_output><empty_output><jupyter_text>## Ridge Regression<jupyter_code>## Ridge Regression from sklearn.linear_model import Ridge from sklearn.model_selection import GridSearchCV ridge=Ridge() parameters={'alpha':[1e-15,1e-10,1e-8,1e-3,1e-2,1,5,10,20,30,35,40]} # giving many values of alpha for fine tune of hyper parameters ridge_regressor=GridSearchCV(ridge,parameters,scoring='neg_mean_squared_error',cv=5) ridge_regressor.fit(X_train,y_train) print(ridge_regressor.best_params_) print(ridge_regressor.best_score_) prediction=ridge_regressor.predict(X_test) prediction import seaborn as sns sns.distplot(y_test-prediction) from sklearn import metrics import numpy as np print('MAE:', metrics.mean_absolute_error(y_test, prediction)) print('MSE:', metrics.mean_squared_error(y_test, prediction)) print('RMSE:', np.sqrt(metrics.mean_squared_error(y_test, prediction)))<jupyter_output>MAE: 12.117294527005022 MSE: 251.0317296411275 RMSE: 15.843980864704662 <jupyter_text>## Lasso Regression<jupyter_code>from sklearn.linear_model import Lasso from sklearn.model_selection import GridSearchCV lasso=Lasso() parameters={'alpha':[1e-15,1e-10,1e-8,1e-3,1e-2,1,5,10,20,30,35,40]} lasso_regressor=GridSearchCV(lasso,parameters,scoring='neg_mean_squared_error',cv=5) lasso_regressor.fit(X_train,y_train) print(lasso_regressor.best_params_) print(lasso_regressor.best_score_) prediction=lasso_regressor.predict(X_test) prediction import seaborn as sns sns.distplot(y_test-prediction) from sklearn import metrics import numpy as np print('MAE:', metrics.mean_absolute_error(y_test, prediction)) print('MSE:', metrics.mean_squared_error(y_test, prediction)) print('RMSE:', np.sqrt(metrics.mean_squared_error(y_test, prediction)))<jupyter_output>MAE: 12.214053814850246 MSE: 262.3797366400714 RMSE: 16.19813991296752
no_license
/First Innings Score Prediction - IPL - Niyaz.ipynb
Niyazkhan66/First-Innings-Score-Predictions
3
<jupyter_start><jupyter_text># Evoluation functions $$ x_{t+1} = r x_{t}(1-x_{t}) $$<jupyter_code>def evolute(x_t, r): return r * x_t * (1-x_t) # draw an evolution curve of the function def draw_curve(fcn, r, n=100): x_0 = np.random.rand() curve = [x_0] for i in range(int(n)): x_0 = fcn(x_0, r) curve.append(x_0) plt.plot(curve) plt.title("curve of the evolution function") draw_curve(evolute, 3, 1e4) # draw convergence point for different r # draw an evolution curve of the function def draw_convergence(fcn, n_iter=100, n_r=100): rs = np.empty(int(n_r)) curve = np.empty(int(n_r)) max_r = np.exp(4) min_r = np.exp(1) for r_int in range(int(n_r)): # use a log for r, as we want more details when r larger r = np.log(min_r + (max_r-min_r)*r_int/int(n_r)) x_0 = np.random.rand() rs[r_int] = r curve[r_int] = x_0 for i in range(int(n_iter)): curve = fcn(curve, rs) plt.figure(figsize=(20,10)) plt.scatter(rs, curve, s=1) plt.title("curve of the evolution function") draw_convergence(evolute, 1e4, 1e5) def evolute_fcn(x_0): return x_0 * (1-x_0) def evolute_fcn(x_0): return x_0 * np.sin(x_0) # draw an evolution curve of the function def draw_curve(fcn, n=100): x_0 = np.random.rand() curve = [x_0] for i in range(int(n)): x_0 = fcn(x_0) curve.append(x_0) plt.plot(curve) plt.title("curve of the evolution function") draw_curve(evolute_fcn, 1e4) def get_mandelbrot(fcn, n_size = 1000, n_iter = 1000): v_min = -5 v_max = 5 max_value = 100 result = np.empty([n_size, n_size]).astype(complex) for real_int in range(n_size): for img_int in range(n_size): real = v_min + (v_max-v_min)*real_int/n_size img = v_min + (v_max-v_min)*img_int/n_size result[real_int, img_int] = np.complex(real, img) for i in range(n_iter) : result = fcn(result) max_value = result[np.logical_not(np.isnan(result))].max() result[result>max_value] = max_value result[np.isnan(result)] = max_value return np.power(result.real, 2) + np.power(result.imag, 2) result = get_mandelbrot(evolute_fcn, 1000, 100) plt.figure(figsize=(10,10)) plt.imshow(result)<jupyter_output><empty_output>
no_license
/ChaosTest.ipynb
gggliuye/for_fun
1
<jupyter_start><jupyter_text>About the dataset This dataset chronicles every MLS game from 1996 to 2016. It is part of a much larger set of historical soccer data pulled from https://github.com/jalapic/engsoccerdata/. In major league soccer the atmosphere of a stadium can change wildly depending on who is the home team. The average attendance can range from in the low thousands to a MLS record attendance of 72,243. In more recent times though the MLS average has been growing, with the average attendance in 2016 being 21,692, a 57% increase over the 13,756 average in 2000. In my studies with this dataset, I want to look into how much a home field can change a game. Does the home team win more often? How much? Are more goals scored at home than away? Which team has the most home field advantage? Do any teams perform better away than home? Are ties affected by home field advantage at all? What types of factors could be causing the advantage / disadvantage? <jupyter_code>df = pd.read_csv('datasets/mls.csv') # lets take a look at what info this dataset has df.head() # what is the average score for each team? print('total goals:', df.totgoal.sum()) print('average goals per match:', df.totgoal.mean()) print('total Home goals:', df.hgoal.sum()) print('total Away goals:', df.vgoal.sum()) print('average Home goals scored:', df.hgoal.mean()) print('average Away goals scored:', df.vgoal.mean()) # how do the goals distribute plt.figure(figsize=(10, 5)) plt.hist(df['hgoal'], alpha=.75, label='Home Goals', color='c') plt.hist(df['vgoal'], alpha=.75, label='Away Goals', color='m') plt.xlabel('Number of Goals Scored') plt.legend() plt.show()<jupyter_output>total goals: 14197 average goals per match: 2.8422422422422424 total Home goals: 8325 total Away goals: 5874 average Home goals scored: 1.6666666666666667 average Away goals scored: 1.175975975975976 <jupyter_text>__As seen above, teams on average score more goals at home, albeit slightly.__ The more telling thing in the graph is that away teams are much more likely to not score at all and home teams are much more likely to score more than 1 goal. From here, it is clear that generally there is an advantage to the home team. With the average home goals being higher than the average away goals, you can come to the conclusion that generally the home team should score more goals than the away team. How this plays out in real life is of course a bit different, so we need to dig a bit deeper. From here, we should take a look into how many wins each team has at home vs how many wins each team has away. <jupyter_code># organize wins & losses home_wins = df.loc[df['hgoal'] > df['vgoal']] away_wins = df.loc[df['vgoal'] > df['hgoal']] ties = df.loc[df['vgoal'] == df['hgoal']] #show wins plt.figure(figsize=(25, 8)) home_wins.groupby('home').size().plot(kind='bar', ylim=(0, 240), color='c', width=.1, label='home wins', position=0, alpha=.75 ) away_wins.groupby('visitor').size().plot(kind='bar', ylim=(0, 240), color='m', width=.1, label='away wins', position=-1.25, alpha=.75) away_wins.groupby('home').size().plot(kind='bar', ylim=(0, 240), color='b', width=.1, label='home losses', position=-3, alpha=.75) home_wins.groupby('visitor').size().plot(kind='bar', ylim=(0, 240), color='r', width=.1, label='away losses', position=-4.25, alpha=.75) plt.xlabel('MLS Teams') plt.ylabel('Number of Wins') plt.legend() plt.show()<jupyter_output><empty_output><jupyter_text>Okay, not that we've seen wins and losses, are ties effected by home field advantage at all?<jupyter_code>#check and see if ties seem to be effected by home / away plt.figure(figsize=(20, 5)) ties.groupby('home').size().plot(kind='bar', ylim=(0, 240), color='c', width=.2, label='home ties', alpha=.75) ties.groupby('visitor').size().plot(kind='bar', ylim=(0, 240), color='m', width=.2, label='away ties', position=-.75, alpha=.75) plt.xlabel('MLS Teams') plt.legend() plt.show()<jupyter_output><empty_output><jupyter_text>It doesn't seem like there is any immediate connection to ties and homefield advantage. If anything, you could say that the teams who win more at home tend to get slightly more away ties then home ties. Home Wins vs Away Wins Based on the above graph, you can see that that the home team obviously has an advantage and no team in MLS has performed better away than they have at home. Some teams have performed better away than others but because there is a large variance in the amount of games each team has had, it is hard to tell exactly how well each team has performed. The MLS has been consistently adding teams over the past 15 years and in 2002 dropped the Miami Fusion and Tampa Bay Mutiny from their roster. To get a true sense of how well each team has actually perfomed, we need to bring each of these records down to the same level. To do that, we need calculate the percentage of wins a team gets both home and away.<jupyter_code># New data frame to hold wins, losses, and calculate ratios win_ratio = pd.DataFrame(data=home_wins.groupby('home').size(), columns=['Home Wins']) win_ratio['Away Wins'] = away_wins.groupby('visitor').size() win_ratio['Home Losses'] = away_wins.groupby('home').size() win_ratio['Away Losses'] = home_wins.groupby('visitor').size() win_ratio['Home Ties'] = ties.groupby('home').size() win_ratio['Away Ties'] = ties.groupby('visitor').size() win_ratio['Home Win %'] = win_ratio['Home Wins']/(win_ratio['Home Wins']+win_ratio['Home Losses']+win_ratio['Home Ties']) win_ratio['Away Win %'] = win_ratio['Away Wins']/(win_ratio['Away Wins']+win_ratio['Away Losses']+win_ratio['Away Ties']) win_ratio['Total Win %'] = (win_ratio['Home Wins']+win_ratio['Away Wins'])/(win_ratio['Home Wins']+win_ratio['Home Losses']+win_ratio['Home Ties']+win_ratio['Away Wins']+win_ratio['Away Losses']+win_ratio['Away Ties']) win_ratio.sort_values('Home Win %', ascending=False) print('Average Home win %:', win_ratio['Home Win %'].mean()) print('Average Away win %:', win_ratio['Away Win %'].mean()) print('Average win %:', win_ratio['Total Win %'].mean()) plt.figure(figsize=(20, 6)) win_ratio['Home Win %'].plot(kind='bar', ylim=(0, 1), color='c', width=.1, label='Home Win %', position=0, alpha=.75) win_ratio['Away Win %'].plot(kind='bar', ylim=(0, 1), color='m', width=.1, label='Away Win %', position=-1.25, alpha=.75) win_ratio['Total Win %'].plot(kind='bar', ylim=(0, 1), color='#a5a5a5', width=.1, label='Total Win %', position=-2.5, alpha=.75) plt.axhline(win_ratio['Home Win %'].mean(), color='c', linestyle='dashed', linewidth=1, label='avg home win %', alpha=.25) plt.axhline(win_ratio['Away Win %'].mean(), color='m', linestyle='dashed', linewidth=1, label='avg away win %', alpha=.25) plt.axhline(win_ratio['Total Win %'].mean(), color='#a5a5a5', linestyle='dashed', linewidth=1, label='avg total win %', alpha=.25) plt.xlabel('MLS Teams') plt.legend() plt.show()<jupyter_output><empty_output><jupyter_text>Win Percentages Based on the above graph, the trend of the LA Glaxy being the most consistent winners continues but they do not walk away with the largest home win percentage. That honor gets taken up by the Miami Fusion, who were disbanded in 2002. It makes you wonder if that number would have stayed that high if they would have stayed in the league. When comparing percentages, the team that most interests me is the Montreal Impact. The Impact win 53% of their matches at home, good enough to be a 7th best. This is while having only a 17% away win percentage, which puts them at 2nd worst in that category. Does Location / Length of Travel Have an Effect on Win Percentages? Seeing how Montreal seeming performs at a much higher level while at home versus away leads me to wonder if a team traveling all the way up to Canada puts them at a disadvantage. Major League Soccer is unique among global soccer leagues in how geographically spread out the teams are. The geographic area of the United State is much larger than any European country, bar Russia, can boast. This causes teams to have to travel a much farther distance than a team playing in a European league might. To see if that puts teams at a disadvantage, we will see if teams hosting a team from the opposite conference leads to a higher win percentage than their overall home win percentage.<jupyter_code># Organize by conference west_host_east = df.loc[(df['hconf'] == 'West') & (df['vconf']=='East')] east_host_west = df.loc[(df['hconf'] == 'East') & (df['vconf']=='West')] # West host wins / losses west_host_east_home_wins = west_host_east.loc[west_host_east['hgoal'] > west_host_east['vgoal']] west_host_east_away_wins = west_host_east.loc[west_host_east['vgoal'] > west_host_east['hgoal']] west_host_east_ties = west_host_east.loc[west_host_east['vgoal'] == west_host_east['hgoal']] # East host wins / losses east_host_west_home_wins = east_host_west.loc[east_host_west['hgoal'] > east_host_west['vgoal']] east_host_west_away_wins = east_host_west.loc[east_host_west['vgoal'] > east_host_west['hgoal']] east_host_west_ties = east_host_west.loc[east_host_west['vgoal'] == east_host_west['hgoal']] #Get the win percentages west_host_east_per = west_host_east_home_wins.groupby('home').size() / (west_host_east_home_wins.groupby('home').size()+west_host_east_away_wins.groupby('home').size()+west_host_east_ties.groupby('home').size()) east_host_west_per = east_host_west_home_wins.groupby('home').size() / (east_host_west_home_wins.groupby('home').size()+east_host_west_away_wins.groupby('home').size()+east_host_west_ties.groupby('home').size()) plt.figure(figsize=(20, 6)) plt.subplot(1,2,1) west_host_east_per.plot(kind='bar', ylim=(0, 1), color='c', label='Home Win % vs West', alpha=.75) plt.axhline(win_ratio['Home Win %'].mean(), color='c', linestyle='dashed', linewidth=1, label='Overall avg home win % (full league)', alpha=.25) plt.title('Western Conference Hosting Eastern Conference') plt.xlabel('MLS Teams') plt.legend() plt.subplot(1,2,2) east_host_west_per.plot(kind='bar', ylim=(0, 1), color='m', label='Home Win % vs East', alpha=.75) plt.axhline(win_ratio['Home Win %'].mean(), color='m', linestyle='dashed', linewidth=1, label='Overall avg home win % (full league)', alpha=.25) plt.title('Eastern Conference Hosting Western Conference') plt.xlabel('MLS Teams') plt.legend() plt.show() print()<jupyter_output><empty_output>
no_license
/thinkful - intro - Capstone.ipynb
tcbriggs981/Drills-and-Challenges
5
<jupyter_start><jupyter_text># 네이버 영화 댓글 수집 -- 영화 메인 https://movie.naver.com/movie/bi/mi/basic.nhn?code=161967 -- 덧글 리스트 https://movie.naver.com/movie/bi/mi/pointWriteFormList.nhn?code=161967&type=after&isActualPointWriteExecute=false&isMileageSubscriptionAlready=false&isMileageSubscriptionReject=false&page=1 https://movie.naver.com/movie/bi/mi/pointWriteFormList.nhn?code=161967&type=after&isActualPointWriteExecute=false&isMileageSubscriptionAlready=false&isMileageSubscriptionReject=false&page=2 ## 맛보기<jupyter_code>import requests from bs4 import BeautifulSoup<jupyter_output><empty_output><jupyter_text>### 페이지 전환<jupyter_code>movieid = 161967 ## 기생충 total_count = 36754 ## 전체 크기=19,653 # https://movie.naver.com/movie/bi/mi/pointWriteFormList.nhn?code=161967&type=after&isActualPointWriteExecute=false&isMileageSubscriptionAlready=false&isMileageSubscriptionReject=false&page=2 urlfit = 'https://movie.naver.com/movie/bi/mi/pointWriteFormList.nhn?code='+str(movieid)+'&type=after&isActualPointWriteExecute=false&isMileageSubscriptionAlready=false&isMileageSubscriptionReject=false' for i in range(1, int(total_count / 10) + 1): url = urlfit + '&page=' + str(i) print(url)<jupyter_output>https://movie.naver.com/movie/bi/mi/pointWriteFormList.nhn?code=107924&type=after&isActualPointWriteExecute=false&isMileageSubscriptionAlready=false&isMileageSubscriptionReject=false&page=1 https://movie.naver.com/movie/bi/mi/pointWriteFormList.nhn?code=107924&type=after&isActualPointWriteExecute=false&isMileageSubscriptionAlready=false&isMileageSubscriptionReject=false&page=2 https://movie.naver.com/movie/bi/mi/pointWriteFormList.nhn?code=107924&type=after&isActualPointWriteExecute=false&isMileageSubscriptionAlready=false&isMileageSubscriptionReject=false&page=3 https://movie.naver.com/movie/bi/mi/pointWriteFormList.nhn?code=107924&type=after&isActualPointWriteExecute=false&isMileageSubscriptionAlready=false&isMileageSubscriptionReject=false&page=4 https://movie.naver.com/movie/bi/mi/pointWriteFormList.nhn?code=107924&type=after&isActualPointWriteExecute=false&isMileageSubscriptionAlready=false&isMileageSubscriptionReject=false&page=5 https://movie.naver.com/movie/bi/mi/pointWriteForm[...]<jupyter_text>### 개별 리뷰 세부 사항<jupyter_code>## 해당 페이지의 html FULL test_url = "https://movie.naver.com/movie/bi/mi/pointWriteFormList.nhn?code=107924&type=after&isActualPointWriteExecute=false&isMileageSubscriptionAlready=false&isMileageSubscriptionReject=false&page=1" resp = requests.get(test_url) html = BeautifulSoup(resp.content, 'html.parser') html ## 1번째 덧글 FULL # /html/body/div/div/div[5]/ul/li[1] --> 1번째 덧글 전체 # /html/body/div/div/div[5]/ul/li[2] --> 2번째 덧글 전체 score_result = html.find('div', {'class': 'score_result'}) lis = score_result.findAll('li') lis[0] # /html/body/div/div/div[5]/ul/li[2] ## 2번째 덧글 FULL score_result = html.find('div', {'class': 'score_result'}) lis = score_result.findAll('li') lis[1] # /html/body/div/div/div[5]/ul/li[1]/div[2]/p/span/text() ## 덧글 내용 review_text = lis[0].find('p').find('span').getText() review_text # /html/body/div/div/div[5]/ul/li[1]/div[2]/p/span/text() ## 덧글 내용 -- 부호 빼고 깨끗하게! review_text = lis[0].find('p').find('span').getText().strip() review_text # /html/body/div/div/div[5]/ul/li[1]/div[1]/em ## 평점 점수 score = lis[0].find('em').getText() score #/html/body/div/div/div[5]/ul/li[1]/div[3]/a[1]/strong --> 좋아요. # /html/body/div/div/div[5]/ul/li[1]/div[3]/a[2]/strong --> 싫어요 ## 좋아요/싫어요 점수 like = lis[0].find('div', {'class': 'btn_area'}).findAll('strong')[0].getText() dislike = lis[0].find('div', {'class': 'btn_area'}).findAll('strong')[1].getText() like, dislike # /html/body/div/div/div[5]/ul/li[1]/div[2]/dl/dt/em[1]/a/span ## 덧글 작성자 닉네임 nickname = lis[0].findAll('a')[0].find('span').getText() nickname # /html/body/div/div/div[5]/ul/li[1]/div[2]/dl/dt/em[2] from datetime import datetime created_at = datetime.strptime(lis[0].find('dt').findAll('em')[1].getText(), "%Y.%m.%d %H:%M") created_at<jupyter_output><empty_output><jupyter_text>## 본 게임<jupyter_code># 필요 라이브러리 불러오기 import requests from bs4 import BeautifulSoup from datetime import datetime import pandas as pd ## 데이터 수집 movieid = 189069 total_count = 20765 ## 전체 크기=20765 urlfit = 'https://movie.naver.com/movie/bi/mi/pointWriteFormList.nhn?code='+str(movieid)+'&type=after&isActualPointWriteExecute=false&isMileageSubscriptionAlready=false&isMileageSubscriptionReject=false' segment=[] for i in range(1, int(total_count / 10) + 1): url = urlfit + '&page=' + str(i) print(str(i) + ' 번째 페이지 parsing....') resp = requests.get(url) html = BeautifulSoup(resp.content, 'html.parser') score_result = html.find('div', {'class': 'score_result'}) lis = score_result.findAll('li') for li in lis: try: watch_movie = li.find('span', {'class':'ico_viewer'}).extract().getText() except: watch_movie = 0 nickname = li.findAll('em')[1].find('a').find('span').getText() created_at = datetime.strptime(li.find('dt').findAll('em')[1].getText(), "%Y.%m.%d %H:%M") review_text = li.find('p').find('span').getText().strip() score = li.find('em').getText() btn_likes = li.find('div', {'class': 'btn_area'}).findAll('strong') like = btn_likes[0].getText() dislike = btn_likes[1].getText() fullinfo = [nickname, review_text, score, like, dislike, created_at, watch_movie] segment.append(fullinfo) ## 수집 데이터 확인 segment # 결과를 dataFrame으로 변환 moviere = pd.DataFrame(segment) moviere ## 컬럼 이름 변경 moviere.columns = ["nickname","text","score","like","dislike","time","watch_movie"] moviere ## CSV로 저장 moviere.to_csv('moviere.csv', header='true', encoding='utf-8')<jupyter_output><empty_output>
permissive
/다만_악에서_구하소서_리뷰_스크래핑.ipynb
minyeseul201701330/movie-review-analysis
4
<jupyter_start><jupyter_text>## Classification of Yelp Review text using CountVectroizer, TF-IDF,RandomForest, Navie Bayes MultinomialNB<jupyter_code>import pandas as pd import numpy as np import matplotlib.pyplot as plt %matplotlib inline import seaborn as sns from sklearn.metrics import classification_report,confusion_matrix from sklearn.pipeline import Pipeline from sklearn.feature_extraction.text import CountVectorizer from sklearn.feature_extraction.text import TfidfTransformer from sklearn.naive_bayes import MultinomialNB from sklearn.model_selection import train_test_split from sklearn.neighbors import KNeighborsClassifier from sklearn.metrics import accuracy_score from sklearn.model_selection import cross_validate from sklearn.model_selection import cross_val_score<jupyter_output><empty_output><jupyter_text>### Load Data and Check sample contents <jupyter_code>data=pd.read_csv('yelp.csv') data.tail() data['text'].values[5]<jupyter_output><empty_output><jupyter_text>### Use some basic functions to view the type of data,min,max values etc for Numerical cloumns <jupyter_code>data.info() data.describe()<jupyter_output><empty_output><jupyter_text>### Check the distribution of number of reviewes and the corresponding ratings <jupyter_code>data['stars'].hist() sns.countplot(data=data,x='stars')<jupyter_output><empty_output><jupyter_text>#### Observation:The number of records for 4 and 5 rating are large compared to 1 and 2 ratings from above Histogram### Create a new column length to store the length of each review text.<jupyter_code>#create a new column length to store the len of the review text data['length']=data['text'].apply(len) #explore funny column data['funny'].unique()<jupyter_output><empty_output><jupyter_text>### Let see how the lenght of each text is distributed <jupyter_code>sns.distplot(data['length'],kde=False)<jupyter_output><empty_output><jupyter_text>#### We observe majority of the reviews hav text length around 1000 and there are some outliers with large lenght of text### Lets plot a scatterplot to see the relation of Lenght to the ratings column<jupyter_code>sns.scatterplot(data=data,x='length',y=data['stars'],hue=data['stars'],palette='coolwarm',legend='full') sns.boxplot(data=data,x='stars',y='length')<jupyter_output><empty_output><jupyter_text>#### Observation: we see the length of each rating is almost equally distributed and each rating has out liers ### Check the corelation of data and plot a heat map to visualize<jupyter_code>data.corr() sns.heatmap(data.corr(),cmap='coolwarm',annot=True)<jupyter_output><empty_output><jupyter_text>#### Observation: We see usevul and cool are highly co-releated as may of the reviews that are useful may have cool votings<jupyter_code>#Lets extract stars with rating 1 and rating 5 BOW=data[(data['stars']==5 )| (data['stars']==1)] print("Number of Revies with rating 1 or 5 are:",(len(BOW['stars']))) #Lets check the content of a review data[data['stars']==1]['text'].values[0]<jupyter_output><empty_output><jupyter_text>### Seperate Dependent and Independent Features for each classification model <jupyter_code>X_NB=BOW['text'] y_NB=BOW['stars'] X_RF=BOW['text'] y_RF=BOW['stars'] X_CV=BOW['text'] y_CV=BOW['stars'] X_KN=BOW['text'] y_KN=BOW['stars']<jupyter_output><empty_output><jupyter_text>### Samples to compare BOW to CountVectrozer - Toy example<jupyter_code> v1=CountVectorizer() X=v1.fit_transform(["Hi There! did u run!I told. Running is like eating healthy diet. DO you Agreee what I said"]) print("CountVectorizer Internal analyzer: \n",v1.get_feature_names()) print(X.toarray()) print(X.get_shape()) v2=CountVectorizer(analyzer=clean_review_text) X=v2.fit_transform(["Hi There! How? are! I told. Running is as good as eating healthy diet. DO you Agreee what I said"]) print("Custom Analyzer: \n",v2.get_feature_names()) print(X.toarray()) print(X.get_shape()) #Example to show how stemmer works: #Stemmers remove morphological affixes from words, leaving only the word stem. from nltk.stem.snowball import SnowballStemmer stemmer = SnowballStemmer("english") print("Sample Stemmer for running is : ",stemmer.stem("running"))<jupyter_output>CountVectorizer Internal analyzer: ['agreee', 'did', 'diet', 'do', 'eating', 'healthy', 'hi', 'is', 'like', 'run', 'running', 'said', 'there', 'told', 'what', 'you'] [[1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1]] (1, 16) Custom Analyzer: ['agree', 'diet', 'eat', 'good', 'healthi', 'hi', 'run', 'said', 'told'] [[1 1 1 1 1 1 1 1 1]] (1, 9) Sample Stemmer for running is : run <jupyter_text>### Build Custom Analyzer<jupyter_code>import string import nltk from nltk.stem.snowball import SnowballStemmer from nltk.corpus import stopwords def clean_review_text(review): """ 1. Remove Punctuation 2. Remove Stop Words 3. Apply SnowBall Stemmer to remove morphological affixes from words, leaving only the word stem. """ stemmer = SnowballStemmer("english") no_punc=[c for c in review if c not in string.punctuation] no_punc=''.join(no_punc) return [stemmer.stem(word) for word in no_punc.split() if word.lower() not in stopwords.words('english')] <jupyter_output><empty_output><jupyter_text>### Build a pipeline with countvectroizer,TFIDF Transformer and Navive_Bayes MultinomialNB<jupyter_code> from sklearn.pipeline import Pipeline from sklearn.feature_extraction.text import CountVectorizer from sklearn.feature_extraction.text import TfidfTransformer from sklearn.naive_bayes import MultinomialNB from sklearn.model_selection import train_test_split X_train_NB, X_test_NB, y_train_NB, y_test_NB = train_test_split(X_NB,y_NB, test_size=0.3, random_state=101) pipeline_NB=Pipeline([('countvec',CountVectorizer(analyzer=clean_review_text)), ('tfidf',TfidfTransformer()), ('algorithm',MultinomialNB())]) pipeline_NB.fit(X_train_NB,y_train_NB) predictions_NB=pipeline_NB.predict(X_test_NB) #Analyze Results print(confusion_matrix(y_test_NB,predictions_NB)) print(classification_report(y_test_NB,predictions_NB))<jupyter_output>[[ 0 228] [ 0 998]] precision recall f1-score support 1 0.00 0.00 0.00 228 5 0.81 1.00 0.90 998 accuracy 0.81 1226 macro avg 0.41 0.50 0.45 1226 weighted avg 0.66 0.81 0.73 1226 <jupyter_text>### Build a pipeline with countvectroizer,TFIDF Transformer and RandomForest Classifier to compare Results<jupyter_code>from sklearn.ensemble import RandomForestClassifier pipeline_RF=Pipeline([('countvec',CountVectorizer(analyzer=clean_review_text)), ('tfidf',TfidfTransformer()), ('algorithm',RandomForestClassifier())]) from sklearn.model_selection import train_test_split X_train_RF, X_test_RF, y_train_RF, y_test_RF = train_test_split(X_RF,y_RF, test_size=0.3, random_state=101) pipeline_RF.fit(X_train_RF,y_train_RF) predictions_RF=pipeline_RF.predict(X_test_RF) #Analyze Results print(confusion_matrix(y_test_RF,predictions_RF)) print(classification_report(y_test_RF,predictions_RF)) <jupyter_output>[[ 89 139] [ 2 996]] precision recall f1-score support 1 0.98 0.39 0.56 228 5 0.88 1.00 0.93 998 accuracy 0.88 1226 macro avg 0.93 0.69 0.75 1226 weighted avg 0.90 0.88 0.86 1226 <jupyter_text>### Analysis #### we see the results form Random Forest Classifier are better compared to NaiveBayes classifier in this case### We observe that the precision is low . Lets try to remove the TF-IDF and try to predict the results<jupyter_code>pipeline_CV=Pipeline([('countvec',CountVectorizer(analyzer=clean_review_text)), ('algorithm',MultinomialNB())]) X_train_CV, X_test_CV, y_train_CV, y_test_CV = train_test_split(X_CV,y_CV, test_size=0.3, random_state=101) pipeline_CV.fit(X_train_CV,y_train_CV) predictions_CV=pipeline_CV.predict(X_test_CV) #Analyze Results print(confusion_matrix(y_test_CV,predictions_CV)) print(classification_report(y_test_CV,predictions_CV)) import numpy as np tot_score=np.zeros(13) print(len(tot_score))<jupyter_output>13 <jupyter_text>### Implement KNN <jupyter_code>X_KN=BOW['text'] y_KN=BOW['stars'] #Take input text and clean punctuations , stop words etc with custom analyzer cvo=CountVectorizer(analyzer=clean_review_text) #Replace X with the cleaned text X_KN=cvo.fit_transform(X_KN) #len(y_train_KN) X_train_KN.shape X_train_KN, X_test_KN, y_train_KN, y_test_KN=train_test_split(X_KN,y_KN,test_size=0.2, random_state=101) #lets keep the neighbours odd number form 1 to 25 n_neigh=[i for i in range(26) if i%2!=0] cv_scores=[] #For each k value #1.Split the same training data with 10 fold and try to find the best K vlue .It returs list of scores. #2.compute the mean score for each this neighbor value and store it for i in n_neigh: model=KNeighborsClassifier(n_neighbors=i,weights='distance') scores = cross_val_score(model, X_train_KN, y_train_KN, cv=10, scoring='accuracy') cv_scores.append(scores.mean()) #Compute the Error for each k value and print the kvalue that has minimum error MSE=[1-scr for scr in cv_scores] print("Errors are:",MSE) print("Least Error is :", min(MSE)) print("k value to use is: ",n_neigh[MSE.index(min(MSE))]) model=KNeighborsClassifier(n_neighbors=5,weights='distance') model.fit(X_train_KN, y_train_KN) predictions_KN=model.predict(X_test_KN) #Analyze Results print(confusion_matrix(y_test_KN,predictions_KN)) print(classification_report(y_test_KN,predictions_KN)) print(accuracy_score(y_test_KN,predictions_KN)) #Lets check the result with TF-IDF and KNN pipeline_KN=Pipeline([('countvec',CountVectorizer(analyzer=clean_review_text)), ('tfidf',TfidfTransformer()), ('algorithm',KNeighborsClassifier(n_neighbors=5))]) X_train_KN, X_test_KN, y_train_KN, y_test_KN=train_test_split(X_KN,y_KN,test_size=0.2, random_state=101) pipeline_KN.fit(X_train_KN,y_train_KN) predictions_KN=pipeline_KN.predict(X_test_KN) #Analyze Results print(confusion_matrix(y_test_KN,predictions_KN)) print(classification_report(y_test_KN,predictions_KN)) print(accuracy_score(y_test_KN,predictions_KN))<jupyter_output>[[ 50 100] [ 16 652]] precision recall f1-score support 1 0.76 0.33 0.46 150 5 0.87 0.98 0.92 668 accuracy 0.86 818 macro avg 0.81 0.65 0.69 818 weighted avg 0.85 0.86 0.83 818 0.8581907090464548
no_license
/CountVectorizer/Yelp_Classification_CountVectorizer_TFIDF.ipynb
hkolgur/NLP
16
<jupyter_start><jupyter_text># Pivot Tables with Pandas - Lab ## Introduction In this lab, we'll learn how to make use of our newfound knowledge of pivot tables to work with real-world data. We'll start by exploring ## Objectives You will be able to: * Understand and explain what a multi-level hierarchical index is * Understand, explain the difference and use df.pivot and pd.pivot_table * Switch between “long” and “wide” in a DataFrame using stack() and unstack() * Transform “wide” to “long” DataFrames using `melt`## Getting Started ### Import Pandas and Matplotlib.pyplot Using Standard Aliases In the cell below: * Import `pandas` and set the standard alias * Import `matplotlib.pyplot` and set the standard alias * Run the ipython magic command to display matplotlib graphs inline within the notebook<jupyter_code>import pandas as pd import matplotlib.pyplot as plt %matplotlib inline<jupyter_output><empty_output><jupyter_text>## Load the Data The data for this activity is stored in a file called `'causes_of_death.tsv'` which is a somewhat morbid dataset from the center for disease control. Note that the file extension .tsv indicates that this data is formatted slightly differently then the standard .csv, the difference being that it has 'tab seperated values' instead of 'comma seperated values'. As such, pass in the optional parameter `delimiter='\t'` into the `pd.read_csv()` method.<jupyter_code>df = pd.read_csv('causes_of_death.tsv', delimiter = '\t')<jupyter_output><empty_output><jupyter_text>Now, display the head of the DataFrame to ensure everything loaded correctly.<jupyter_code>df.head()<jupyter_output><empty_output><jupyter_text>Our data is currently in **_Wide_** format. We can tidy this up by converting it to **_Long_** format by using groupby statements to aggregate our data into a much neater, more readable format. # Groupby Aggregations Complete the following groupby statements.# 1) Groupby State and Gender. Sum the values.<jupyter_code># Your code here df.groupby(by = ['State', 'Gender']).sum().head()<jupyter_output><empty_output><jupyter_text># 2) Groupby State and Gender and Race. Find the average values.<jupyter_code># Your code here df.groupby(by = ['State', 'Gender', 'Race']).mean().head(15)<jupyter_output><empty_output><jupyter_text># 3) Groupby Gender and Race. Find the minimum values.<jupyter_code># Your code here df.groupby(by = ['Gender', 'Race']).min().head(15)<jupyter_output><empty_output><jupyter_text>## 4) Create a bar chart of the total number of deaths by state. * Sort your columns in order (ascending or descending are both acceptable). * Also make sure to include a title, axes labels and have your graph be an appropriate size. **_NOTE:_** In order to do this, slice the `Deaths` column after the `.groupby()` method, but before the `sum()` method. You can even chain the `.plot()` call on after the `sum()` call and do this all on one line, excluding the labeling of the graph!<jupyter_code>#Your code here states = list(df.groupby(by = ['State']).sum().index.values) states = sorted(states, reverse = False) filtered = df.groupby(by = ['State']).sum() values = list(filtered['Deaths']) values plt.figure(figsize = (15,15)) plt.bar(states,values, label = 'Deaths by State') plt.legend() plt.title('Deaths by state') plt.xlabel('State') plt.ylabel('Deaths') plt.show()<jupyter_output><empty_output><jupyter_text>### Inspecting our Data Let's go one step further and get a print-out of the data type of each column. In the cell below, get the `.info()` of our DataFrame, and note the data type that each column is currently stored as. <jupyter_code>df.info()<jupyter_output><class 'pandas.core.frame.DataFrame'> RangeIndex: 4115 entries, 0 to 4114 Data columns (total 12 columns): Notes 0 non-null float64 State 4115 non-null object State Code 4115 non-null int64 Ten-Year Age Groups 4115 non-null object Ten-Year Age Groups Code 4115 non-null object Gender 4115 non-null object Gender Code 4115 non-null object Race 4115 non-null object Race Code 4115 non-null object Deaths 4115 non-null int64 Population 4115 non-null object Crude Rate 4115 non-null object dtypes: float64(1), int64(2), object(9) memory usage: 385.9+ KB <jupyter_text>Let's look at some samples from the Population column to see if the current encoding seems appropriate for the data it contains. In the cell below, display the population values for the first 5 rows in the DataFrame. <jupyter_code>df['Population'].head(5)<jupyter_output><empty_output><jupyter_text>Just to be extra sure, let's check the value counts to see how many times each unqiue value shows up in the dataset. We'll only look at the top 5. In the cell below, print out the top 5 `value_counts()` of the population column of the DataFrame. <jupyter_code>df['Population'].value_counts().head(5)<jupyter_output><empty_output><jupyter_text>Clearly, this data should be stored as a numeric type, not a categorical type. ### 5a) Reformat the Population Column as an Integer As stands, not all values will be able to be reformated as integers. Most of the cells in the the `Population` column contain integer values, but the entire column is currently encoded in string format because some cells contain the string `"Not Applicable"`. We need to remove these rows before we can cast the Population column to an Integer data type. In the cell below: * Slice the rows of `df` where the Population column is equal to `'Not Applicable'`. * Use `to_drop.index` to drop the offending rows from `df`. Be sure to set the `axis=0`, and `inplace=True` * Cast the Population column to an integer data type using the `.astype()` function, with the single parameter `int64` passed in. * Print the Population column's `dtype` attribute to confirm it is now stored in `int64` format. **_NOTE:_** `.astype()` returns a copy of the column, so make sure you set the Population column equal to what this method returns--don't just call it!<jupyter_code>#Your code here criterion = df.Population.map(lambda x: x == "Not Applicable") to_drop = df[criterion] df.drop(axis = 0, index = to_drop.index, inplace = True) df['Population'] = df['Population'].astype('int64') df.Population.dtype df.head() <jupyter_output><empty_output><jupyter_text>### 5b) Complete the Bar Chart Now that we've reformatted our data, let's create a bar chart of the of the Mean Population by State.<jupyter_code>#Your code here df1 = df.groupby(by = ['State']).mean() states = list(df1.index) mean_pops = list(df1['Population']) plt.figure(figsize = (15,15)) plt.bar(states, mean_pops, label = ('mean population')) plt.legend() plt.title('Mean Population by State') plt.xlabel('State') plt.ylabel('Mean Population') plt.show()<jupyter_output><empty_output><jupyter_text>Below we will investigate how we can combine the **pivot** method along with the **groupby** method to combine some cool **stacked bar charts**! ### Using Aggregate Functions In the cell below: * Group `df` by `'State'` and `'Gender'`, and then slice both `'Death'` and `'Population'` from it. Chain the `.agg()` method to return the mean, min, max, and standard deviation these sliced columns. **_NOTE:_** This only requires one line of code. By now, you've probably caught on that the code required to do this follows this pattern: `([things to group by])[columns to slice].agg([aggregates to return])` Then, display the head of this of this new DataFrame.<jupyter_code># A sample groupby similar to above. grouped = df.groupby(by = ['State', 'Gender'])['Deaths', 'Population'].agg(['mean', 'min', 'max', 'std']) grouped.head() <jupyter_output><empty_output><jupyter_text>Note how Pandas denotes a multi-hierarchical index in the DataFrame above. Let's inspect how a multi-hierarchical index is actually stored. In the cell below, display the `index` attribute of this DataFrame. <jupyter_code>grouped.index<jupyter_output><empty_output><jupyter_text>A two-dimensional array denotes the multiple levels, with each possible combination being a row in our `grouped` DataFrame. Let's reset the index, and then see how it changes. In the cell below, call the DataFrame's `reset_index()` function. Then, display the head of the DataFrame. <jupyter_code># First, reset the index. Notice the subtle difference; State and Gender are now columns rather then the index. grouped = grouped.reset_index() grouped.head()<jupyter_output><empty_output><jupyter_text>Note how the way the index is displayed has changed. The index columns that made up the multi-hierarchical index before are now stored as columns of data, with each row given a more traditional numerical index. Let's confirm this by reexamining the `index` attribute of `grouped` in the cell below.<jupyter_code>grouped.index<jupyter_output><empty_output><jupyter_text>However, look again at the displayed DataFrame--specifically, the columns. Resetting the index has caused the DataFrame to use a mutlti-indexed structure for the columns. In the cell below, examine the `columns` attribute of `grouped` to confirm this. <jupyter_code>#Notice that this causes columns to be MultiIndexed! grouped.columns<jupyter_output><empty_output><jupyter_text>#### Column Levels Since we're working with miulti-hierarchical indices, we can examine the indices available at each level. In the cell below, use the `get_level_values` method contained within the DataFrame's `columns` object to get the values for the outermost layer of the index. <jupyter_code>grouped.columns.get_level_values<jupyter_output><empty_output><jupyter_text>Now, get the level values for the inner layer of the index. <jupyter_code>grouped.columns.get_level_values(0)<jupyter_output><empty_output><jupyter_text>### Flattening the DataFrame We can also **_flatten_** the DataFrame from a multi-hierarchical index to more traditional one-dimensional index. We do this by creating each unique combination possible of every level of the multi-hierarchical index. Since this is a complex task, you do not need to write it--but take some time to examine the code in the cell below and see if you can understand how it works! <jupyter_code>#We could also flatten these: cols0 = grouped.columns.get_level_values(0) cols1 = grouped.columns.get_level_values(1) grouped.columns = [col0 + '_' + col1 if col1 != '' else col0 for col0, col1 in list(zip(cols0, cols1))] #The list comprehension above is more complicated then what we need but creates a nicer formatting and #demonstrates using a conditional within a list comprehension. #This simpler version works but has some tail underscores where col1 is blank: #grouped.columns = [col0 + '_' + col1 for col0, col1 in list(zip(cols0, cols1))] grouped.columns<jupyter_output><empty_output><jupyter_text>Now that we've flattened the DataFrame, let's inspect a couple rows to see what it looks like. In the cell below, inspect the head of the `grouped` DataFrame. <jupyter_code>grouped.head()<jupyter_output><empty_output><jupyter_text>## Using Pivots Now, we'll gain some practice using the DataFrame class's built-in `.pivot()` method. In the cell below, call the DataFrame's pivot method with the following parameters: * index = `'State'` * columns = `'Gender'` * values = `'Deaths_mean'` Then, display the head of our new `pivot` DataFrame to see what it looks like. <jupyter_code># Now it's time to pivot! pivot = grouped.pivot(index = 'State', columns = 'Gender', values = 'Deaths_mean') pivot.head() <jupyter_output><empty_output><jupyter_text>Great! We've just created a pivot table. Let's reset the index and see how it changes our pivot table. In the cell below, reset the index of the `pivot` object as we did previously. Then, display the head of the object to see if we can detect any changes.<jupyter_code># Again, notice the subtle difference of reseting the index: pivot.reset_index(inplace = True) pivot.head()<jupyter_output><empty_output><jupyter_text>### Visualizing Data With Pivot Tables Now, we'll make use of our newly created pivot table to quickly create some visualizations of our data. In the cell below, call `pivot.plot()` with the following parameters: * kind = `'barh'` * figsize = `(15,8)`<jupyter_code># Now let's make a sweet bar chart!! pivot.plot(kind = 'barh', figsize = (15,8))<jupyter_output><empty_output><jupyter_text>Notice the Y-axis is currently just a list of numbers. That's because when we reset the index, it defaulted to assigning integers as the index for the DataFrame. Let's set the index back to `'State'`, and then recreate the visualization. In the cell below: * Use the `pivot` object's `set_index()` method and set the index to `'State'`. Then, chain this with a `.plot()` call to recreate the visualization using the code we used in the cell above. All the code in this cell should be done in a single line. Just call the methods--do not rebind `pivot` to be equal to this line of code. <jupyter_code>#Where's the states?! Notice the y-axis is just a list of numbers. #This is populated by the DataFrame's index. #When we used the .reset_index() method, we created a new numbered index to name each row. #Let's fix that by making state the index again. pivot.set_index('State').plot(kind = 'barh', figsize = (15,8))<jupyter_output><empty_output><jupyter_text>Now, that we've created a visualization with the states as the y-axis, let's print out the head of the `pivot` object again. <jupyter_code># Also notice that if we call the DataFrame pivot again, state is not it's index. #The above method returned a DataFrame with State as index and we plotted it, #but it did not update the DataFrame itself. pivot.index<jupyter_output><empty_output><jupyter_text>Note that the index has not changed. That's because the code we wrote when we set the index to the 'State' column returns a copy of the DataFrame object with the index set to 'State'--by default, it does not mutate original `pivot` object. If we want to do that, we'll need to capture the new object returned by updating the contents of the `pivot` variable. In the cell below, set the index of `pivot` to `'State'`. Then, recreate the bar plot using this new object. <jupyter_code>#If we wanted to more permanently change the index we would set it first and then plot: pivot.set_index('State', inplace = True) <jupyter_output><empty_output><jupyter_text>Again, let's check the head of the DataFrame to confirm that the index structure has changed. <jupyter_code>pivot.index<jupyter_output><empty_output><jupyter_text>Finally, let's stack these bar charts to see how that looks. In the cell below, recreate the visualization we did in the cell above, but this time, also pass in `stacked=True` as a parameter. <jupyter_code># Lastly, let's stack each of these bars for each state. #Notice we don't have to worry about index here, because we've already set it above. pivot.plot(kind = 'barh', figsize = (15,8), stacked = True)<jupyter_output><empty_output><jupyter_text> ## Stacking and Unstacking DataFrames Now, let's get some practice stacking and unstacking DataFrames. ### Stacking In the cell below, let's display the head of `grouped` to remind ourselves of the format we left it in. <jupyter_code>grouped.head()<jupyter_output><empty_output><jupyter_text>As we can see above, `grouped` is currently in a flattened format, with no hierarchical structure to it's indices. In the cell below, call the `grouped` DataFrame's `.stack()` method. <jupyter_code>grouped.stack()<jupyter_output><empty_output><jupyter_text>As we can see, the `stack()` method has stacked our DataFrame from a flattened format into one with a multi-hierarchical index! This is an easy, quick way to aggregate our data. ### Unstacking Now, we'll explore unstacking with the `pivot` DataFrame, which is already stacked into a pivot table. In the cell below, set unstack `pivot` using the object's `.unstack()` method. Then, display the object to see how it has changed. <jupyter_code>pivot = pivot.unstack() pivot.head()<jupyter_output><empty_output><jupyter_text>Note that it has unstacked the multi-hierarchical structure of the `pivot` DataFrame by one level. Let's call it one more time and display the results! In the cell below, set pivot equal to `pivot.unstack()` again, and then display the `pivot` object to see how things have changed.<jupyter_code>pivot = pivot.unstack() pivot.head()<jupyter_output><empty_output>
non_permissive
/index.ipynb
dgoldstein24/dsc-0-04-12-pivot-tables-lab-online-ds-pt-100118
33
<jupyter_start><jupyter_text># Generating Digits using a Variational AutoEncoder## Creating A Variational AutoEncoder### Import Necessary Libraries<jupyter_code>import tensorflow as tf import tensorflow.keras.backend as K from tensorflow.keras.layers import Dense, Input from tensorflow.keras.models import Model<jupyter_output><empty_output><jupyter_text>### Define Paramaters And A Sampling Layer<jupyter_code>num_units_hidden_1 = 500 num_units_hidden_2 = 500 num_units_hidden_3 = 20 num_units_hidden_4 = num_units_hidden_2 num_units_hidden_5 = num_units_hidden_1 initializer = tf.keras.initializers.VarianceScaling() params = { 'activation': 'elu', 'kernel_initializer': initializer } class Sampling(tf.keras.layers.Layer): def call(self, inputs): mean, gamma = inputs return K.random_normal(tf.shape(gamma)) * K.exp(gamma / 2) + mean<jupyter_output><empty_output><jupyter_text>### Define Encoder Model<jupyter_code>encoder_input_layer = Input(shape = (28 * 28, )) hidden_1 = Dense(num_units_hidden_1, **params)(encoder_input_layer) hidden_2 = Dense(num_units_hidden_2, **params)(hidden_1) mean = Dense(num_units_hidden_3, activation = None)(hidden_2) gamma = Dense(num_units_hidden_3, activation = None)(hidden_2) codings = Sampling()([mean, gamma]) encoder = Model(inputs = [encoder_input_layer], outputs = [mean, gamma, codings])<jupyter_output><empty_output><jupyter_text>### Define Decoder Model<jupyter_code>decoder_input_layer = Input(shape = (20, )) hidden_3 = Dense(num_units_hidden_4, **params)(decoder_input_layer) hidden_4 = Dense(num_units_hidden_5, **params)(hidden_3) output_layer = Dense(28 * 28, activation = 'sigmoid')(hidden_4) decoder = Model(inputs = [decoder_input_layer], outputs = [output_layer])<jupyter_output><empty_output><jupyter_text>### Define Variational Autoencoder From Encoders And Decoders<jupyter_code>_, _, encodings = encoder(encoder_input_layer) reconstructions = decoder(encodings) vae_model = Model(inputs = [encoder_input_layer], outputs = [reconstructions])<jupyter_output><empty_output><jupyter_text>### Define The Loss Function, And Compile The Model With An Adam Optimizer<jupyter_code>latent_loss = -0.5 * K.sum(1 + gamma - K.exp(gamma) - K.square(mean), axis = -1) vae_model.add_loss(K.mean(latent_loss) / 784.) optimizer = tf.keras.optimizers.Adam(learning_rate = 0.001) vae_model.compile(optimizer = optimizer, loss = 'binary_crossentropy')<jupyter_output><empty_output><jupyter_text>## MNIST dataset<jupyter_code>import matplotlib.pyplot as plt import numpy as np mnist_dataset = tf.keras.datasets.mnist (train_images, train_labels), (test_images, test_labels) = mnist_dataset.load_data() train_images, test_images = train_images / 255, test_images / 255 fig, axes = plt.subplots(6, 6, figsize = (15, 15)) samples = np.array([ [0, 1, 2, 3, 4, 5], [6, 7, 8, 9, 10, 11], [12, 13, 14, 15, 16, 17], [18, 19, 20, 21, 22, 23], [24, 25, 26, 27, 28, 29], [30, 31, 32, 33, 34, 35] ]) for row in range(0, 6): for col in range(0, 6): current_image = train_images[samples[row, col]] axes[row, col].set_title(train_labels[samples[row, col]]) axes[row, col].axis('off') axes[row, col].imshow(current_image) train_images = train_images.reshape((-1, 28 * 28)) test_images = test_images.reshape((-1, 28 * 28)) images = np.vstack((train_images, test_images)) plt.show()<jupyter_output><empty_output><jupyter_text>## Training<jupyter_code>fit_params = { 'epochs': 15, 'batch_size': 64 } history = vae_model.fit(images, images, **fit_params)<jupyter_output>Train on 70000 samples Epoch 1/15 70000/70000 [==============================] - 26s 369us/sample - loss: 0.1639 Epoch 2/15 70000/70000 [==============================] - 25s 353us/sample - loss: 0.1425 Epoch 3/15 70000/70000 [==============================] - 44s 629us/sample - loss: 0.1389 Epoch 4/15 70000/70000 [==============================] - 38s 542us/sample - loss: 0.1369 Epoch 5/15 70000/70000 [==============================] - 34s 485us/sample - loss: 0.1354 Epoch 6/15 70000/70000 [==============================] - 32s 455us/sample - loss: 0.1342 Epoch 7/15 70000/70000 [==============================] - 36s 511us/sample - loss: 0.1334 Epoch 8/15 70000/70000 [==============================] - 31s 445us/sample - loss: 0.1327 Epoch 9/15 70000/70000 [==============================] - 26s 369us/sample - loss: 0.1322 Epoch 10/15 70000/70000 [==============================] - 25s 362us/sample - loss: 0.1316 Epoch 11/15 70000/70000 [==============================] - 30s 422us/sample [...]<jupyter_text>## Results<jupyter_code>input_codings = np.random.normal(size = [60, num_units_hidden_3]) generated_images = decoder.predict(input_codings) fig, axes = plt.subplots(6, 6, figsize = (15, 15)) i = 0 for row in range(0, 6): for col in range(0, 6): current_image = np.array(generated_images[i]).reshape(28, 28) axes[row, col].axis('off') axes[row, col].imshow(current_image) i += 1 plt.show()<jupyter_output><empty_output>
no_license
/Generating Digits/Generating Digits using a Variational Autoencoder/Generating Digits using an Variational Autoencoder.ipynb
xnell90/Data-Science-Portfolio
9
<jupyter_start><jupyter_text># BERT for QQP - Fine-tune BERT on quora question pairs From my point of view, there are two ways to use BERT in qqp, one way is using the pretrained BERT to embed sentences, the embedded vectors are the features in the NN model, and the rest steps are easy (feed the features into neural network sturctures and classify question pairs). Actually there are some implementations on that such as `SBERT`(Sentence BERT, or `sentence-transformations`), we can also use packages like `bert as a service` to use the pretrained model easily. Another way is to do fine-tuning the BERT model on downstream tasks, which may achieve a better performance since the pretrained model will be trained on the downstream training dataset. So here we implement the second idea, we did fine-tuning for BERT on qqp problem. The model contains two parts: - The first part is **BERT pretrained model**(using `bert-base-uncased` model): to embed input ([id,attention_masks,token_type_id]). - The second part is **a neural network sturcture** consists of {bilstm-pooling-dense-sigmoid} with dropout. In the model, these things are done step by step: - prepare the raw data (load, preprocess...) - create a data generator and generate training and testing data for the model - set bert_model.trainable=False, train the model (train the layers in the neural network structure) - set bert_model.trainable=Ture, train the model again (fine-tuning BERT) - make predictions on the testing set - get submissions on qqp problem The model scores **0.27835**. Although not better than our featured Siamese-LSTM model (becasue this model is not mature), there are some improvements will be done in the future : - add more features (feature_nlp.csv, feature_tm.csv, etc...) from `feature_engineering_train.ipynb` as a supplement - adjust the neural network structure (layers to perform 'feature extraction') - hyperparameters and training epoches (we just use 1 epoch training to get the scores due to time and resources limit) - ... **NOTES:** - This data runs on conda env: tensorflow_env(tensorflow 2.3.1 version) in my macbook16 - With reference of keras official documents and transformer official documents to implement BERT fine-tuning to the qqp problem. - https://huggingface.co/transformers/model_doc/bert.html - https://keras.io/examples/nlp/semantic_similarity_with_bert/ - https://keras.io/examples/nlp/masked_language_modeling/ - https://www.cnblogs.com/dogecheng/p/11617940.html - SBERT: https://www.sbert.net/docs/training/overview.html?highlight=get_word_embedding_dimension - BERT as a service: https://github.com/hanxiao/bert-as-service#building-a-qa-semantic-search-engine-in-3-minutes<jupyter_code># Install transformers !pip install transformers import numpy as np import pandas as pd import tensorflow as tf import transformers # tf_version:2.3.1 have swish activation function # Hyperparameters max_length = 128 # Maximum length of input sentence to the model. batch_size = 32 epochs = 2 from sklearn.model_selection import train_test_split # There are more than 550k samples in total; we will use 100k for this example. train_df = pd.read_csv("Data/train.csv") train_df, valid_df = train_test_split(train_df, test_size=0.1, random_state=42) test_df = pd.read_csv('Data/test.csv') # Shape of the data print(f"Total train samples : {train_df.shape}") print(f"Total validation samples: {valid_df.shape}") print(f"Total test samples: {test_df.shape}") print(type(train_df)) print(type(test_df)) print(type(valid_df)) # Drop Nan values on dataset print("Number of missing values") train_df = train_df.fillna(' ') test_df = test_df.fillna(' ') valid_df = valid_df.fillna(' ') print(train_df.isnull().sum()) train_df.head() test_df.head() # Split data and labels train_df["label"] = train_df["is_duplicate"] #y_train = tf.keras.utils.to_categorical(train_df.label, num_classes=2) y_train = train_df.label.values valid_df["label"] = valid_df["is_duplicate"] y_val = valid_df.label.values # Data generator: change the raw train, validation and test data into # !! the batch_size is better dividable by the numbers of trainning data and test data class BertSemanticDataGenerator(tf.keras.utils.Sequence): ''' Generates batches of data. Parameters: sentence_pairs: Array of premise and hypothesis input sentences. labels: Array of labels. batch_size: Integer batch size. shuffle: boolean, whether to shuffle the data. include_targets: boolean, whether to incude the labels. Returns: Tuples `([input_ids, attention_mask, `token_type_ids], labels)` (or just `[input_ids, attention_mask, `token_type_ids]` if `include_targets=False`) ''' def __init__( self, sentence_pairs, labels, batch_size=batch_size, shuffle=True, include_targets=True, ): self.sentence_pairs = sentence_pairs self.labels = labels self.shuffle = shuffle self.batch_size = batch_size self.include_targets = include_targets # Load our BERT Tokenizer to encode the text. # We will use base-base-uncased pretrained model. self.tokenizer = transformers.BertTokenizer.from_pretrained( "bert-base-uncased", do_lower_case=True ) self.indexes = np.arange(len(self.sentence_pairs)) self.on_epoch_end() def __len__(self): # The number of batches per epoch. return len(self.sentence_pairs) // self.batch_size def __getitem__(self, idx): # Retrieves the batch of index. indexes = self.indexes[idx * self.batch_size : (idx + 1) * self.batch_size] sentence_pairs = self.sentence_pairs[indexes] # With BERT tokenizer's batch_encode_plus batch of both the sentences are # encoded together and separated by [SEP] token. encoded = self.tokenizer.batch_encode_plus( sentence_pairs.tolist(), add_special_tokens=True, max_length=max_length, return_attention_mask=True, return_token_type_ids=True, pad_to_max_length=True, return_tensors="tf", ) # Convert batch of encoded features to numpy array. input_ids = np.array(encoded["input_ids"], dtype="int32") attention_masks = np.array(encoded["attention_mask"], dtype="int32") token_type_ids = np.array(encoded["token_type_ids"], dtype="int32") # Set to true if data generator is used for training/validation. if self.include_targets: labels = np.array(self.labels[indexes], dtype="int32") return [input_ids, attention_masks, token_type_ids], labels else: return [input_ids, attention_masks, token_type_ids] def on_epoch_end(self): # Shuffle indexes after each epoch if shuffle is set to True. if self.shuffle: np.random.RandomState(42).shuffle(self.indexes) !pip install ipywidgets !jupyter nbextension enable --py widgetsnbextension # Create the model under a distribution strategy scope. strategy = tf.distribute.MirroredStrategy() with strategy.scope(): # Encoded token ids from BERT tokenizer. input_ids = tf.keras.layers.Input( shape=(max_length,), dtype=tf.int32, name="input_ids" ) # Attention masks indicates to the model which tokens should be attended to. attention_masks = tf.keras.layers.Input( shape=(max_length,), dtype=tf.int32, name="attention_masks" ) # Token type ids are binary masks identifying different sequences in the model. token_type_ids = tf.keras.layers.Input( shape=(max_length,), dtype=tf.int32, name="token_type_ids" ) # Loading pretrained BERT model. bert_model = transformers.TFBertModel.from_pretrained("bert-base-uncased") # Freeze the BERT model to reuse the pretrained features without modifying them. bert_model.trainable = False sequence_output, pooled_output = bert_model( input_ids, attention_mask=attention_masks, token_type_ids=token_type_ids ) # Add trainable layers on top of frozen layers to adapt the pretrained features on the new data. bi_lstm = tf.keras.layers.Bidirectional( tf.keras.layers.LSTM(64, return_sequences=True) )(sequence_output) # Applying hybrid pooling approach to bi_lstm sequence output. avg_pool = tf.keras.layers.GlobalAveragePooling1D()(bi_lstm) max_pool = tf.keras.layers.GlobalMaxPooling1D()(bi_lstm) concat = tf.keras.layers.concatenate([avg_pool, max_pool]) dropout = tf.keras.layers.Dropout(0.3)(concat) output = tf.keras.layers.Dense(1, activation="sigmoid")(dropout) model = tf.keras.models.Model( inputs=[input_ids, attention_masks, token_type_ids], outputs=output ) model.compile( optimizer=tf.keras.optimizers.Adam(), loss="binary_crossentropy", metrics=["acc"], ) print(f"Strategy: {strategy}") model.summary() train_data = BertSemanticDataGenerator( train_df[["question1", "question2"]].values.astype("str"), y_train, batch_size=batch_size, shuffle=True, ) valid_data = BertSemanticDataGenerator( valid_df[["question1", "question2"]].values.astype("str"), y_val, batch_size=batch_size, shuffle=False, ) history = model.fit( train_data, validation_data=valid_data, epochs=epochs, use_multiprocessing=True, workers=-1, ) # change epoch to 1 epochs = 1 # Unfreeze the bert_model. bert_model.trainable = True # Recompile the model to make the change effective. model.compile( optimizer=tf.keras.optimizers.Adam(1e-5), loss="binary_crossentropy", metrics=["accuracy"], ) model.summary() history = model.fit( train_data, validation_data=valid_data, epochs=epochs, use_multiprocessing=True, workers=-1, ) test_data = BertSemanticDataGenerator( test_df[["question1", "question2"]].values.astype("str"), labels=None, batch_size=36, shuffle=False, include_targets=False ) # the batch_size should be able to be devided by 2345796 which is the test_data shape pred_probs = model.predict( test_data, batch_size=1024, verbose=1, workers=-1 ) print('Making the submission') test_ids = test_df.test_id.values submission = pd.DataFrame({'test_id':test_ids, 'is_duplicate':preds_final.ravel()}) submission.to_csv('Models/bert_1.csv', index=False)<jupyter_output>Making the submission
no_license
/bert.ipynb
champagnesupernova14/Duplicate-Question-Pair-Identification
1
<jupyter_start><jupyter_text><jupyter_code>import pandas as pd import numpy as np from sklearn.linear_model import LogisticRegression from sklearn.cluster import KMeans from sklearn.ensemble import RandomForestClassifier from sklearn.neighbors import KNeighborsClassifier from sklearn.svm import SVC from sklearn.model_selection import GridSearchCV from xgboost import XGBClassifier from mlxtend.classifier import EnsembleVoteClassifier from sklearn.preprocessing import StandardScaler def performance_of_models(x, y, list_of_models): results = [model_summary(model, x, y) for model in list_of_models] results = pd.DataFrame(results) return results def model_summary(model, x, y): y_pred=model.predict(x) cnf_matrix = confusion_matrix(y_test, y_pred) precision=precision_score(y_test,y_pred) accuracy=accuracy_score(y_test,y_pred) recall=recall_score(y_test,y_pred) f1=f1_score(y_test,y_pred) auc=roc_auc_score(y_test, y_pred) fpr, tpr, trashhold = roc_curve(y_test, y_pred) result = {'model': str(model), 'confusion matrix': cnf_matrix, 'precision': precision, 'accuracy': accuracy, 'recall': recall, 'f1 score': f1, 'auc': auc, 'fpr': fpr, 'tpr': tpr, 'trashhold': trashhold, }) return result def Log_reg(X,y): log_reg=LogisticRegression() penalty=['l1','l2'] C=np.logspace(0,4,20) hyperpara=dict(penalty=penalty,C=C) gridsearch=GridSearchCV(log_reg,hyperpara) return gridsearch.fit(X,y) def Rand_forest(X,y): RF=RandomForestClassifier(n_estimators=150) n_estimators=np.arange(1,20,1) criterion=['gini','entropy'] hyperpara=dict(n_estimators=n_estimators,criterion=criterion) gridsearch=GridSearchCV(RF,hyperpara) return gridsearch.fit(X,y) def KNeig(X,y): Kneig=KNeighborsClassifier(n_neighbors=5) n_neighbors=np.arange(1,20,1) weights=['uniform','distance'] algorithm=['auto', 'ball_tree', 'kd_tree', 'brute'] hyperpara=dict(n_neighbors=n_neighbors,weights=weights,algorithm=algorithm) gridsearch=GridSearchCV(Kneig,hyperpara) return gridsearch.fit(X,y) def XGBoost_(X,y): xgb=XGBClassifier() n_estimators=np.arange(1,20,1) silent=[True,False] max_depth=np.arange(1,20,1) hyperpara=dict(n_estimators=n_estimators,silent=silent,max_depth=max_depth) return GridSearchCV(xgb,hyperpara).fit(X,y) def SVC_(X,y): Sscaler=StandardScaler() X=Sscaler.fit_transform(X) svc=SVC(kernel='rbf',random_state=0,gamma=1,C=1) C=np.arange(1,10,10.5) kernel=['linear', 'poly', 'rbf', 'sigmoid', 'precomputed'] hyperpara=dict() return GridSearchCV(svc,hyperpara).fit(X,y)<jupyter_output><empty_output>
no_license
/Models.ipynb
tomasz-soltysiak/Fraud_transaction
1
<jupyter_start><jupyter_text># Nb-20180311-1212-Price-NormalizedPlotFrom previous notebook, wondering what plots look like if normalized to Adj Close...So, get list of mega, large, and mid-cap companies on NYSE for looking for 1% price gain prediction using machine learning. - https://www.nasdaq.com/screening/companies-by-industry.aspx?exchange=NYSE&marketcap=Mega-cap - https://www.nasdaq.com/screening/companies-by-industry.aspx?exchange=NYSE&marketcap=Large-cap - https://www.nasdaq.com/screening/companies-by-industry.aspx?exchange=NYSE&marketcap=Mid-cap<jupyter_code># Put these at the top of every notebook, to get automatic reloading and inline plotting %reload_ext autoreload %autoreload 2 %matplotlib inline import datetime as dt import numpy as np import pandas as pd import matplotlib.colors as colors import matplotlib.dates as mdates import matplotlib.ticker as mticker import matplotlib.mlab as mlab import matplotlib.pyplot as plt import matplotlib.font_manager as font_manager import finance as fat # Change the plot size. plt.rcParams['figure.figsize'] = [18.0, 10.0]<jupyter_output><empty_output><jupyter_text>### Load Company Data<jupyter_code>c = pd.read_csv('data/NYSE-Companies-Mega-Large-Mid-Cap.csv') c.head() c = c.sort_values('MarketCap',ascending=False) c.head()<jupyter_output><empty_output><jupyter_text>### Create featuresThese are some ideas from a paper: _A Feature Fusion Based Forecasting Model for Financial Time Series_ by Zhiqiang Guo , Huaiqing Wang, Quan Liu, Jie Yang - http://journals.plos.org/plosone/article?id=10.1371/journal.pone.0101113![PredictionParameters.png](attachment:PredictionParameters.png)We'll start of with a sub-set of these:### Create scalable featuresFirst, create features that can be linearly scaled (within a row).**Hypothesis**: having all price data scaled between 0.0-1.0 on a per row basis will improve ease of training and accuracy of predicitions.<jupyter_code>ticker = c.iloc[1]['Symbol'] data = fat.get_price_data(ticker) ohlcv = data # Just use 'Adj Close' data = pd.DataFrame(data, columns=['Adj Close']) data.head() # Gain data['Gain'] = data['Adj Close'].diff() data.head() # SMA: 6, 12, 20, 200 data = fat.add_sma_columns(data, 'Adj Close', [6,12]) data.tail() data = fat.add_ema_columns(data, 'Adj Close', [12,26]) data.tail() data = fat.add_bollinger_bands(data, 'Adj Close') data.tail() overlay = data.copy() del overlay['Gain'] fat.plot_daily_ticker(ohlcv['2018'], title=ticker, overlay=overlay) data['Return'] = data['Gain']/data['Adj Close'].shift(1) data.tail() data['IsBigGain'] = data['Return'] > 0.01 data['IsNextDayBigGain'] = data['IsBigGain'].shift(-1) data.tail()<jupyter_output><empty_output><jupyter_text>Let's scale and make sure things look similar...Try scaling rows...<jupyter_code>scaled_rows = data.div(data['Adj Close'], axis=0) del scaled_rows['Gain'] del scaled_rows['Return'] del scaled_rows['IsBigGain'] scaled_rows['IsNextDayBigGain'] = ((scaled_rows['IsNextDayBigGain'] > 0) * .1) + 0.8 scaled_rows.tail() scaled_rows['2018'].plot() columns=['Adj Close', 'Adj Close BBandHi20', 'Adj Close BBandLo20', 'IsNextDayBigGain'] simp_plot = pd.DataFrame(scaled_rows, columns=columns) simp_plot['2017':'2018'].plot() plot_data = pd.DataFrame(data, columns=columns) plot_data['2016':'2018'].plot() columns=['Adj Close', 'Adj Close BBandHi20'] simp_plot = pd.DataFrame(scaled_rows, columns=columns) simp_plot['2016':'2018'].plot()<jupyter_output><empty_output>
no_license
/Notebooks/Nb-20180311-1212-Price-NormalizedPlot.ipynb
webclinic017/finance-16
4
<jupyter_start><jupyter_text># 在分类问题中的评价指标<jupyter_code>from sklearn import datasets import numpy as np digits = datasets.load_digits() X = digits.data y = digits.target.copy() y[digits.target == 9] = 1 y[digits.target != 9] = 0 # 划分测试集和训练集 from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=666) # 用逻辑回归训练模型 from sklearn.linear_model import LogisticRegression log_reg = LogisticRegression() log_reg.fit(X_train, y_train) log_reg.score(X_test, y_test) y_log_predict = log_reg.predict(X_test)<jupyter_output><empty_output><jupyter_text>## 混淆矩阵<jupyter_code>def TN(y_true, y_predict): assert len(y_true) == len(y_predict) return np.sum((y_test==0) & (y_predict==0)) TN(y_test, y_log_predict) def FP(y_true, y_predict): assert len(y_true) == len(y_predict) return np.sum((y_test==0) & (y_predict==1)) FP(y_test, y_log_predict) def FN(y_true, y_predict): assert len(y_true) == len(y_predict) return np.sum((y_test==1) & (y_predict==0)) FN(y_test, y_log_predict) def TP(y_true, y_predict): assert len(y_true) == len(y_predict) return np.sum((y_test==1) & (y_predict==1)) TP(y_test, y_log_predict) def confusion_matrix(y_true, y_predict): return np.array([ [TN(y_true, y_predict), FP(y_true, y_predict)], [FN(y_true, y_predict), TP(y_true, y_predict)] ]) confusion_matrix(y_test, y_log_predict) # 精准率 def precision_score(y_true, y_predict): tp = TP(y_true, y_predict) fp = FP(y_true, y_predict) try: return tp / (tp + fp) except: return 0.0 precision_score(y_test, y_log_predict) # 召回率 def recall_score(y_true, y_predict): tp = TP(y_true, y_predict) fn = FN(y_true, y_predict) try: return tp / (tp + fn) except: return 0.0 recall_score(y_test, y_log_predict)<jupyter_output><empty_output><jupyter_text># scikit-learn 中的混淆矩阵、精准率、召回率<jupyter_code>from sklearn.metrics import confusion_matrix, precision_score, recall_score confusion_matrix(y_test, y_log_predict) precision_score(y_test, y_log_predict) recall_score(y_test, y_log_predict)<jupyter_output><empty_output>
no_license
/python_in_DM/mooc_learn/evaluation_in_classfication.ipynb
tyfloving/time_series_predict
3
<jupyter_start><jupyter_text> Licence CC BY-NC-ND Thierry Parmentelat <jupyter_code>from plan import plan_extras; plan_extras("notebooks")<jupyter_output><empty_output>
no_license
/notebooks/03-1-notebooks-basic.ipynb
chagaz/primer
1
<jupyter_start><jupyter_text>The code in this notebook was mainly modified from https://github.com/pytorch/examples/blob/master/mnist/main.py<jupyter_code>from __future__ import print_function import argparse import torch import torch.nn as nn import torch.nn.functional as F import torch.optim as optim from torchvision import datasets, transforms class Net(nn.Module): def __init__(self): super(Net, self).__init__() self.conv1 = nn.Conv2d(1, 20, 5, 1) self.conv2 = nn.Conv2d(20, 50, 5, 1) self.fc1 = nn.Linear(4*4*50, 500) self.fc2 = nn.Linear(500, 10) def forward(self, x): x = F.relu(self.conv1(x)) x = F.max_pool2d(x, 2, 2) x = F.relu(self.conv2(x)) x = F.max_pool2d(x, 2, 2) x = x.view(-1, 4*4*50) x = F.relu(self.fc1(x)) x = self.fc2(x) return F.log_softmax(x, dim=1) def train(model, device, train_loader, optimizer, epoch): model.train() for batch_idx, (data, target) in enumerate(train_loader): data, target = data.to(device), target.to(device) optimizer.zero_grad() output = model(data) loss = F.nll_loss(output, target) loss.backward() optimizer.step() if batch_idx % log_interval == 0: print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format( epoch, batch_idx * len(data), len(train_loader.dataset), 100. * batch_idx / len(train_loader), loss.item())) def test(model, device, test_loader): model.eval() test_loss = 0 correct = 0 with torch.no_grad(): for data, target in test_loader: data, target = data.to(device), target.to(device) output = model(data) test_loss += F.nll_loss(output, target, reduction='sum').item() # sum up batch loss pred = output.argmax(dim=1, keepdim=True) # get the index of the max log-probability correct += pred.eq(target.view_as(pred)).sum().item() test_loss /= len(test_loader.dataset) print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format( test_loss, correct, len(test_loader.dataset), 100. * correct / len(test_loader.dataset))) batch_size = 64 test_batch_size = 1000 epochs = 10 lr = 0.01 momentum = 0.5 log_interval = 10 # set CUDA computing device = torch.device("cuda" if torch.cuda.is_available() else "cpu") kwargs = {'num_workers': 1, 'pin_memory': True} if torch.cuda.is_available() else {} # load data train_loader = torch.utils.data.DataLoader( datasets.MNIST('../data', train=True, download=True, transform=transforms.Compose([ transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,)) ])), batch_size=batch_size, shuffle=True, **kwargs) test_loader = torch.utils.data.DataLoader( datasets.MNIST('../data', train=False, transform=transforms.Compose([ transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,)) ])), batch_size=test_batch_size, shuffle=True, **kwargs) model = Net().to(device) # use multi-GPUs model = nn.DataParallel(model) optimizer = optim.SGD(model.parameters(), lr=lr, momentum=momentum) for epoch in range(1, epochs + 1): train(model, device, train_loader, optimizer, epoch) test(model, device, test_loader) torch.save(model.state_dict(),"mnist_cnn.pt")<jupyter_output>Train Epoch: 1 [0/60000 (0%)] Loss: 2.337183 Train Epoch: 1 [640/60000 (1%)] Loss: 2.242791 Train Epoch: 1 [1280/60000 (2%)] Loss: 2.173599 Train Epoch: 1 [1920/60000 (3%)] Loss: 2.028429 Train Epoch: 1 [2560/60000 (4%)] Loss: 1.842776 Train Epoch: 1 [3200/60000 (5%)] Loss: 1.373316 Train Epoch: 1 [3840/60000 (6%)] Loss: 0.980961 Train Epoch: 1 [4480/60000 (7%)] Loss: 0.826316 Train Epoch: 1 [5120/60000 (9%)] Loss: 0.703372 Train Epoch: 1 [5760/60000 (10%)] Loss: 0.644808 Train Epoch: 1 [6400/60000 (11%)] Loss: 0.453746 Train Epoch: 1 [7040/60000 (12%)] Loss: 0.636141 Train Epoch: 1 [7680/60000 (13%)] Loss: 0.305338 Train Epoch: 1 [8320/60000 (14%)] Loss: 0.356823 Train Epoch: 1 [8960/60000 (15%)] Loss: 0.243398 Train Epoch: 1 [9600/60000 (16%)] Loss: 0.378249 Train Epoch: 1 [10240/60000 (17%)] Loss: 0.347061 Train Epoch: 1 [10880/60000 (18%)] Loss: 0.290357 Train Epoch: 1 [11520/60000 (19%)] Loss: 0.312700 Train Epoch: 1 [12160/60000 (20%)] Loss: 0.331665 Train Epoch: 1 [12800/60000 ([...]
no_license
/mnist_example.ipynb
mcps5601/Pytorch-practice
1
<jupyter_start><jupyter_text>Класс моделей ARIMA недостаточно богат для наших данных: с их помощью, например, никак нельзя учесть взаимосвязи между рядами. Это можно сделать с помощью векторной авторегрессии VARIMA, но её питоновская реализация не позволяет использовать регрессионные признаки. Кроме того, авторегрессионный подход не позволяет учитывать, например, взаимодействия между сезонными компонентами. Вы могли заметить, что форма суточных сезонных профилей в будни и выходные немного разная; явно моделировать этот эффект с помощью ARIMA не получится. Нам нужна более сложная модель. Давайте займёмся сведением задачи массового прогнозирования рядов к регрессионной постановке! Вам понадобится много признаков. Некоторые из них у вас уже есть — это: идентификатор географической зоны дата и время количество поездок в периоды, предшествующие прогнозируемому синусы, косинусы и тренды, которые вы использовали внутри регрессионной компоненты ARIMA Кроме того, не спешите выбрасывать построенный вами на прошлой неделе прогнозы — из них может получиться хороший признак для регрессии! Вы можете попробовать разные регрессионный модели, но хорошие результаты, скорее всего, дадут такие, которые будут позволять признакам взаимодействовать друг с другом. Поскольку прогноз нужен на 6 часов вперёд, проще всего будет построить 6 независимых регрессионных моделей — одна для прогнозирования y^T+1|T, другая для y^T+2|T и т.д. Чтобы сдать задание, выполните следующую последовательность действий. Для каждой из шести задач прогнозирования y^T+i|T,i=1,…,6 сформируйте выборки. Откликом будет yT+i при всевозможных значениях T, а признаки можно использовать следующие: идентификатор географической зоны — категориальный год, месяц, день месяца, день недели, час — эти признаки можно пробовать брать и категориальными, и непрерывными, можно даже и так, и так (done) синусы, косинусы и тренды, которые вы использовали внутри регрессионной компоненты ARIMA (done) сами значения прогнозов ARIMA y^T+i|TARIMA количество поездок из рассматриваемого района в моменты времени yT,yT−1,…,yT−K (параметр K можно подбирать; попробуйте начать, например, с 6) количество поездок из рассматриваемого района в моменты времени yT−24,yT−48,…,yT−24∗Kd (параметр Kd можно подбирать; попробуйте начать, например, с 2) суммарное количество поездок из рассматриваемого района за предшествующие полдня, сутки, неделю, месяц Будьте внимательны при создании признаков — все факторы должны быть рассчитаны без использования информации из будущего: при прогнозировании y^T+i|T,i=1,…,6 вы можете учитывать только значения y до момента времени T включительно. Выбранными моделями постройте для каждой географической зоны и каждого конца истории от 2016.04.30 23:00 до 2016.05.31 17:00 прогнозы на 6 часов вперёд; посчитайте в ноутбуке ошибку прогноза по следующему функционалу: Qmay=1R∗739∗6∑r=1R∑T=2016.04.3023:002016.05.3117:00∑i=16y^T|T+ir−yT+ir. Убедитесь, что ошибка полученных прогнозов, рассчитанная согласно функционалу Q, определённому на прошлой неделе, уменьшилась по сравнению с той, которую вы получили методом индивидуального применения моделей ARIMA. Если этого не произошло, попробуйте улучшить ваши модели. Итоговыми моделями постройте прогнозы для каждого конца истории от 2016.05.31 23:00 до 2016.06.30 17:00 и запишите все результаты в один файл в формате geoID, histEndDay, histEndHour, step, y. Здесь geoID — идентификатор зоны, histEndDay — день конца истории в формате id,y, где столбец id состоит из склеенных через подчёркивание идентификатора географической зоны, даты конца истории, часа конца истории и номера отсчёта, на который делается предсказание (1-6); столбец y — ваш прогноз. Загрузите полученный файл на kaggle: https://inclass.kaggle.com/c/yellowtaxi. Добавьте в ноутбук ссылку на сабмишн. Загрузите ноутбук в форму.Подгружаем данные<jupyter_code># id нужных регионов regsDf = pd.read_csv('../crowdRegs.csv',names=['id','regId']); # времянные ряды для этих регионов df = pd.read_pickle('../loadData/crowdRegs3.pcl') regNames = regsDf.regId.values.astype('str') df.columns = regNames<jupyter_output><empty_output><jupyter_text>Наверное, оптимальный способ - пройтись по всем регионам, сформировать требуемую выборку, а потом - состыковать. Вероятно, в процессе работы получится векторизовать это действие. Пожалуй, имеет смысл сначала для всего фрейма добавить общие для всех колонок признаки (тренд, гармоники, даты, дни недели)<jupyter_code>def processDataFrame(inpDf, Kw = 7, Ka = 2): """ Обрабатываем сразу весь dateFrame и добавляем признаки, общие для всех рядов тренд, гармоники, категориальные перемнные для дат, дней недели, etc) Parameters: Kw number of weeks harmonics Ka number of annual harmonics """ inpDf = inpDf.assign(linear = (inpDf.index - datetime.datetime(2014,1,1,0,0,0))/np.timedelta64(1, 'h')) # час — эти признаки можно пробовать брать и категориальными # и непрерывными, можно даже и так, и так # добавляем гармонические фичи for ind in range(1,Kw+1): inpDf['weekCos'+str(ind)]= np.cos(np.pi*inpDf.linear*ind/168) inpDf['weekSin'+str(ind)]= np.sin(np.pi*inpDf.linear*ind/168) for ind in range(1,Ka+1): inpDf['yearCos'+str(ind)]= np.cos(2*np.pi*inpDf.linear*ind/8766) inpDf['yearSin'+str(ind)]= np.sin(2*np.pi*inpDf.linear*ind/8766) # добавляем числовое и категориальные свойства для дней недели inpDf = inpDf.assign(dayOfWeek = inpDf.index.dayofweek) lbDays = preprocessing.LabelBinarizer() lbDays.fit(list(np.arange(6))) DoW = pd.DataFrame(lbDays.transform(inpDf.index.dayofweek),columns = ['dayOfWeek_'+str(x) for x in np.arange(6)], index = inpDf.index) inpDf = inpDf.merge(DoW,left_index=True,right_index=True) # добавляем dummy variables для месяца inpDf = inpDf.assign(month = inpDf.index.month) lbMonths = preprocessing.LabelBinarizer() lbMonths.fit(list(np.arange(12))) Months = pd.DataFrame(lbMonths.transform(inpDf.index.month),columns = ['month_'+str(x) for x in np.arange(1,13)], index = inpDf.index) inpDf = inpDf.merge(Months,left_index=True,right_index=True); # добавляем год (вещественный) inpDf = inpDf.assign(year = inpDf.index.year) # добавляем день месяца (вещественный) inpDf = inpDf.assign(day = inpDf.index.day) # добавляем час (вещественный и категориальный) inpDf = inpDf.assign(hour = inpDf.index.hour) lbHours = preprocessing.LabelBinarizer() lbHours.fit(list(np.arange(24))) Hours = pd.DataFrame(lbHours.transform(inpDf.index.hour),columns = ['hour_'+str(x) for x in np.arange(24)], index = inpDf.index) inpDf = inpDf.merge(Hours,left_index=True,right_index=True) return inpDf<jupyter_output><empty_output><jupyter_text>Теперь делаем индивидуальную обработку для каждого региона добавляем идентификатор географической зоны — категориальный количество поездок из рассматриваемого района в моменты времени yT,yT−1,…,yT−K (параметр K можно подбирать; попробуйте начать, например, с 6) количество поездок из рассматриваемого района в моменты времени yT−24,yT−48,…,yT−24∗Kd (параметр Kd можно подбирать; попробуйте начать, например, с 2) суммарное количество поездок из рассматриваемого района за предшествующие полдня, сутки, неделю, месяц 2) <jupyter_code>def processSeries(df,tReg,Kh = 6, Kp = 2): """ Обработка одного данного ряда parameters: df - начальный датафрейм, из которого выберем для обработки один ряд tReg - название ряда, который надо обработать Kh - количество отслеживаемых прошлых суточных лагов "назад" Kp - количество отслеживаемых прошлых периодических лагов (период 24 часа) """ tDf = df.loc[:,tReg.split() + commonFeatures].rename(columns={tReg:'y'}) tDf = tDf.assign(region = tReg) for timeLag in np.arange(1,Kh+1): name = 'hourLag_'+str(timeLag) tDf.loc[:,name] = tDf.y.shift(periods=timeLag) for timeLag in np.arange(1,Kp+1): name = 'periodicLag_'+str(timeLag) tDf.loc[:,name] = tDf.y.shift(periods=timeLag*24) tDf.fillna(0,inplace=True) # суммарное количество поездок из рассматриваемого района за предшествующие полдня, сутки, неделю, месяц tDf.loc[:,'sum12'] = tDf.y.rolling(window = 12, min_periods = 1).sum() tDf.loc[:,'sum24'] = tDf.y.rolling(window = 24, min_periods = 1).sum() tDf.loc[:,'sumWeek'] = tDf.y.rolling(window = 168, min_periods = 1).sum() tDf.loc[:,'sumMonth'] = tDf.y.rolling(window = 720, min_periods = 1).sum() #создаём шесть целевые переменных для каждого конца истории for targetVar in np.arange(1,7): name = 'y'+str(targetVar) tDf.loc[:,name] = tDf.y.shift(-targetVar) tDf.fillna(0,inplace=True) return tDf def saveResults(rdf, fName): rnd = np.round f = open(fName,'w') f.writelines('id,y\n') for ind, row in rdf.iterrows(): historyStart = row.date - datetime.timedelta(hours = 1) if historyStart > datetime.datetime(2016,6,30,17): continue s0 = str(row.region)+'_'+ str(datetime.datetime.strftime(historyStart, "%Y-%m-%d"))+ '_'+ str(historyStart.hour) s1 = s0 +'_1,'+str(rnd(row.get('y1'))) + '\n' f.writelines(s1) s2 = s0 +'_2,'+str(rnd(row.get('y2'))) + '\n' f.writelines(s2) s3 = s0 +'_3,'+str(rnd(row.get('y3'))) + '\n' f.writelines(s3) s4 = s0 +'_4,'+str(rnd(row.get('y4'))) + '\n' f.writelines(s4) s5 = s0 +'_5,'+str(rnd(row.get('y5'))) + '\n' f.writelines(s5) s6 = s0 +'_6,'+str(rnd(row.get('y6'))) + '\n' f.writelines(s6) f.close() # общая обработка данных df2 = processDataFrame(df,Kw = 7, Ka = 2) commonFeatures = list(set(df2.columns)-set(df.columns.values)) # обработка отдельных рядов df3 = pd.DataFrame() for regName in regNames: df3 = pd.concat([df3, processSeries(df2,regName,Kh = 12, Kp = 4)]) #regDf = df3.get('region') #df3 = pd.get_dummies(df3,'region') #df3 = df3.assign(region = regDf) df3.head() startTrain = '2014-01-01 00:00:00' endTrain = '2016-04-30 23:00:00' startValidation = '2016-05-01 00:00:00' endValidation = '2016-05-31 23:00:00' startTest = '2016-06-01 00:00:00' endTest = '2016-06-30 23:00:00' df3 = df3.loc[startTrain:endTest,:] targetList = ['y1','y2','y3','y4','y5','y6'] tsGroups = df3.groupby('region') dlCols = ['y','y1','y2','y3','y4','y5','y6'] df3.head() df4 = pd.DataFrame() for regId in tsGroups.groups.keys(): print ' ' print regId ts = tsGroups.get_group(regId) for target in targetList: linReg = linear_model.Ridge(alpha=0.1) linReg.fit(ts.loc[startTrain:endValidation,:].drop(dlCols,axis = 1),ts.loc[startTrain:endValidation,target]) prediction = linReg.predict(ts.drop(dlCols,axis = 1)) prediction[prediction<0] =0 print target, ' ',MAE(prediction,ts.get(target)) colName = target+'_reg' ts.loc[:,colName] = prediction # перезаписываю колонку? df4 = pd.concat([df4, ts]) df4 dropCols = ['index','y','y1','y2','y3','y4','y5','y6'] prediction = getTrips(testSet.drop(dropCols, axis = 1)) predictionDf = pd.DataFrame(prediction.T,columns=['y1','y2','y3','y4','y5','y6']) predictionDf.set_index(testSet.index,inplace=True) predictionDf = predictionDf.merge(regDf,left_index=True,right_index=True,how='left') predictionDf = predictionDf.round() diff = np.abs(predictionDf.y1-testSet.y1)+np.abs(predictionDf.y2-testSet.y2)+np.abs(predictionDf.y3-testSet.y3)+np.abs(predictionDf.y4-testSet.y4)+np.abs(predictionDf.y5-testSet.y5)+np.abs(predictionDf.y6-testSet.y6) print 'Error is', diff.mean()/6 # теперь надо сохранить это в файл fName = 'res_week5-4.csv' saveResults(predictionDf,fName)<jupyter_output><empty_output>
no_license
/week5/week 5-submit.ipynb
PhySci/projectNYtaxi
3
<jupyter_start><jupyter_text># Introduction A feature of modern programming languages is an extensive library of standard functions. This means that we can make use of standard, well-tested and optimised functions for performing common tasks rather than writing our own. This makes our programs shorter and of higher quality, and in most cases faster. ## Objectives - Introduce use of standard library functions - Importing and using modules - Introduction to namespaces - Print formatting of floats# The standard library You have already used some standard library types and functions. In previous activities we have used built-in types like `string` and `float`, and the function `abs` for absolute value. We have made use of the standard library function `print` to display to the screen. Python has a large standard library. To organise it, most functionality is arranged into 'modules', with each module providing a range of related functions. Before you program a function, check if there is a library function that can perform the task. The Python standard library is documented at https://docs.python.org/3/library/. Search engines are a good way to find library functions, e.g. entering "Is there a Python function to compute the hyperbolic tangent of a complex number" into a search engine will take you to the function `cmath.tanh`. Try this link: http://bfy.tw/7aMc.# Other libraries The standard library tools are general purpose and will be available in any Python environment. Specialised tools are usually made available in other libraries (modules). There is a huge range of Python libraries available for specialised problems. We have already used some parts of NumPy (http://www.numpy.org/), which is a specialised library for numerical computation. It provides much the same functionality as MATLAB. The simplest way to install a non-standard library is using the command `pip`. From the command line, the library NumPy is installed using: pip install numpy and from inside a Jupyter notebook use: !pip install numpy NumPy is so commonly used it is probably already installed on computers you will be using. You will see `pip` being used in some later notebooks to install special-purpose tools. When developing programs outside of learning exercises, if there is a no standard library module for a problem you are trying to solve, search online for a module before implementing your own.# Using library functions: `math` example To use a function from a module we need to make it available in our program. This is called 'importing'. We have done this in previous notebooks with the `math` module, but without explanation. The process is explained below. The `math` module (https://docs.python.org/3/library/math.html) provides a wide range of mathematical functions. For example, to compute the square root of a number, we do:<jupyter_code>import math x = 2.0 x = math.sqrt(x) print(x)<jupyter_output>1.4142135623730951 <jupyter_text>Dissecting the above code block, the line ```python import math ``` makes the math module available in our program. It is good style to put all `import` statements at the top of a file (or at the top of a cell when using a Jupyter notebook). The function call ```python x = math.sqrt(x) ``` says 'use the `sqrt` function from the `math` module to compute the square root'. By prefixing `sqrt` with `math`, we are using a *namespace* (which in this case is `math`). This makes clear precisely which `sqrt` function we want to use - there could be more than one `sqrt` function available. > *Namespaces:* The prefix '`math`' indicates which '`sqrt`' function we want to use. This might seem pedantic, but in practice there are often different algorithms for performing the same or similar operation. They might vary in speed and accuracy. In some applications we might need an accurate (but slow) method for computing the square root, while for other applications we might need speed with a compromise on accuracy. But, if two functions have the same name and are not distinguished by a name space, we have a *name clash*. > In a large program, two developers might choose the same name for two functions that perform similar but slightly different tasks. If these functions are in different modules, there will be no name clash since the module name provides a 'namespace' - a prefix that provides a distinction between the two functions. Namespaces are extremely helpful for multi-author programs. A weakness of older languages, like C and Fortran, is that they do not support namespaces. Most modern languages do support namespaces. We can import specific functions from a module, e.g. importing only the `sqrt` function:<jupyter_code>from math import sqrt x = 2.0 x = sqrt(x) print(x)<jupyter_output>1.4142135623730951 <jupyter_text>This way, we are importing (making available) only the `sqrt` function from the `math` module (the `math` module has a large number of functions). We can even choose to re-name functions that we import:<jupyter_code>from math import sqrt as some_math_function x = 2.0 x = some_math_function(x) print(x)<jupyter_output>1.4142135623730951 <jupyter_text>Renaming functions at import can be helpful to keep code short, and we will see below it can be useful for switching between different functions. However the above choice of name is very poor practice - the name '`some_math_function`' is not descriptive. Below is a more sensible example.Say we program a function that computes the roots of a quadratic function using the quadratic formula:<jupyter_code>from math import sqrt as square_root def compute_roots(a, b, c): "Compute roots of the polynomial f(x) = ax^2 + bx + c" root0 = (-b + square_root(b*b - 4*a*c))/(2*a) root1 = (-b - square_root(b*b - 4*a*c))/(2*a) return root0, root1 # Compute roots of f = 4x^2 + 10x + 1 root0, root1 = compute_roots(4, 10, 1) print(root0, root1)<jupyter_output>-0.10435607626104004 -2.3956439237389597 <jupyter_text>The above is fine as long as the polynomial has real roots. However, the function `math.sqrt` will give an error (technically, it will 'raise an exception') if a negative argument is passed to it. This is to stop naive programmers from making silly mistakes. We do know about complex numbers, so we want to compute complex roots. The Python module `cmath` provides functions for complex numbers. If we were to use `cmath.sqrt` to compute the square root, our function would support complex roots. We do this by importing the `cmath.sqrt` functions as `square_root`:<jupyter_code># Use the function from cmath as square_root to compute the square root # (this will replace the previously imported sqrt function) from cmath import sqrt as square_root # Compute roots (roots will be complex in this case) root0, root1 = compute_roots(40, 10, 1) print(root0, root1) # Compute roots (roots will be real in this case, but cmath.sqrt always returns a complex type) root0, root1 = compute_roots(4, 10, 1) print(root0, root1)<jupyter_output>(-0.125+0.09682458365518543j) (-0.125-0.09682458365518543j) (-0.10435607626104004+0j) (-2.3956439237389597+0j) <jupyter_text>The function now works for all cases because `square_root` is now using `cmath.sqrt`. Note that `cmath.sqrt` always returns a complex number type, even when the complex part is zero.# String functions and string formatting A standard function that we have used since the beginning is '`print`'. This function turns arguments into a string and displays the string to the screen. So far, we have only printed simple variables and relied mostly on the default conversions to a string for printing to the screen (the exception was printing the floating point representation of 0.1, where we needed to specify the number of significant digits to see the inexact representation in binary).## Formatting We can control how strings are formatted and displayed. Below is an example of inserting a string variable and a number variable into a string of characters:<jupyter_code># Format a string with name and age name = "Amber" age = 19 text_string = "My name is {} and I am {} years old.".format(name, age) # Print to screen print(text_string) # Short-cut for printing without assignment name = "Ashley" age = 21 print("My name is {} and I am {} years old.".format(name, age))<jupyter_output>My name is Amber and I am 19 years old. My name is Ashley and I am 21 years old. <jupyter_text>For floating-point numbers, we often want to control the formatting, and in particular the number of significant figures displayed. Using the display of $\pi$ as an example: <jupyter_code># Import math module to get access to math.pi import math # Default formatting print("The value of π using the default formatting is: {}".format(math.pi)) # Control number of significant figures in formatting print("The value of π to 5 significant figures is: {:.5}".format(math.pi)) print("The value of π to 8 significant figures is: {:.8}".format(math.pi)) print("The value of π to 20 significant figures and using scientific notation is: {:.20e}".format(math.pi))<jupyter_output>The value of π using the default formatting is: 3.141592653589793 The value of π to 5 significant figures is: 3.1416 The value of π to 8 significant figures is: 3.1415927 The value of π to 20 significant figures and using scientific notation is: 3.14159265358979311600e+00 <jupyter_text>There are many more ways in which float formatting can be controlled - search online if you want to format a float in a particular way. # Module example: parallel processing Standard modules can make very technical problems simpler. An example is parallel processing. The majority of CPUs - from phones to supercomputers - now have CPUs with multiple cores, with each core performing computations. To benefit from the multiple cores, we need to compute in *parallel*. A 'standard' program performs tasks in order, and in this case only one core will be utilised and the rest will remain idle. To get the best performance from the hardware, we need to compute in parallel. That is, we perform multiple tasks at the same time. Parallel processing is an enormous topic on its own, but we can touch upon it here because we have standard libraries that make it easy to use. Managing parallel tasks at a low-level is extremely technical, but standard libraries can make it easy. We will use the `multiprocessing` module, and use it to sort lists of numbers concurrently. We start by looking at how to generate a list of random integers using the `random` module. The following code creates a list (more on lists in the following notebook) of 10 random integers in the range 0 to 100 (not including 100):<jupyter_code>import random x = random.sample(range(0, 100), 10) print(x)<jupyter_output>[30, 9, 78, 59, 85, 81, 67, 97, 11, 91] <jupyter_text>To create a sorted list, we used the built-in function `sorted`:<jupyter_code>y = sorted(x) print(y)<jupyter_output>[9, 11, 30, 59, 67, 78, 81, 85, 91, 97] <jupyter_text>Now, if we need to sort multiple different lists, we could sort the lists one after the other, or we could sort several lists at the same time (in parallel). Our operating system will then manage the dispatch of the sorting task to different processor cores. Before seeing how to do this, we implement a function to perform the sorting:<jupyter_code>import multiprocessing import random def mysort(N): "Create a list of random numbers of length N, and return a sorted list" # Create random list x = random.sample(range(0, N), N) # Print process identifier (just out of interest) print("Process id: {}".format(multiprocessing.current_process())) # Return sorted list of numbers return sorted(x)<jupyter_output><empty_output><jupyter_text>To create the sorted lists, making available three processes (threads), we use:<jupyter_code>N = 20000 with multiprocessing.Pool(processes=3) as p: p.map(mysort, [N, N, N]) # Call function mysort three times<jupyter_output>Process id: <ForkProcess(ForkPoolWorker-3, started daemon)> Process id: <ForkProcess(ForkPoolWorker-2, started daemon)> Process id: <ForkProcess(ForkPoolWorker-1, started daemon)> <jupyter_text>We see from the output that three different processes have worked on our problem - one for each sorting task. We use parallel processing the make computations faster. Let's time our computation using different numbers of processes to see how it affects performance. To perform the timing, we first encapsulate our problem in a function:<jupyter_code>def parallel_sort(N, num_proc): "Create three lists of random numbers (each of length N) using num_proc processes" with multiprocessing.Pool(processes=num_proc) as p: p.map(mysort, [N, N, N])<jupyter_output><empty_output><jupyter_text>Using the magic command '[`%time`](Notebook%20tips.ipynb#Simple-timing)', we time the sorting using just one process (the one process sorts the lists one after the other):<jupyter_code>N = 500000 %time parallel_sort(N, 1) <jupyter_output>Process id: <ForkProcess(ForkPoolWorker-4, started daemon)> Process id: <ForkProcess(ForkPoolWorker-4, started daemon)> Process id: <ForkProcess(ForkPoolWorker-4, started daemon)> CPU times: user 56.6 ms, sys: 34 ms, total: 90.6 ms Wall time: 1.82 s <jupyter_text>We see from '`Process id`' that the same process worked on all three lists. We now try with up to 4 processes (there are only three lists to sort, so only three will be used):<jupyter_code>%time parallel_sort(N, 4) <jupyter_output>Process id: <ForkProcess(ForkPoolWorker-5, started daemon)> Process id: <ForkProcess(ForkPoolWorker-6, started daemon)> Process id: <ForkProcess(ForkPoolWorker-8, started daemon)> CPU times: user 46.9 ms, sys: 46.8 ms, total: 93.7 ms Wall time: 832 ms
non_permissive
/PartIA-Computing-Michaelmas/05 Library functions.ipynb
tommypratama/ds
14
<jupyter_start><jupyter_text> # Working with a real world data-set using SQL and Python Estimated time needed: **30** minutes ## Objectives After completing this lab you will be able to: - Understand the dataset for Chicago Public School level performance - Store the dataset in an Db2 database on IBM Cloud instance - Retrieve metadata about tables and columns and query data from mixed case columns - Solve example problems to practice your SQL skills including using built-in database functions ## Chicago Public Schools - Progress Report Cards (2011-2012) The city of Chicago released a dataset showing all school level performance data used to create School Report Cards for the 2011-2012 school year. The dataset is available from the Chicago Data Portal: [https://data.cityofchicago.org/Education/Chicago-Public-Schools-Progress-Report-Cards-2011-/9xs2-f89t](https://data.cityofchicago.org/Education/Chicago-Public-Schools-Progress-Report-Cards-2011-/9xs2-f89t?cm_mmc=Email_Newsletter-_-Developer_Ed%2BTech-_-WW_WW-_-SkillsNetwork-Courses-IBMDeveloperSkillsNetwork-DB0201EN-SkillsNetwork-20127838&cm_mmca1=000026UJ&cm_mmca2=10006555&cm_mmca3=M12345678&cvosrc=email.Newsletter.M12345678&cvo_campaign=000026UJ&cm_mmc=Email_Newsletter-_-Developer_Ed%2BTech-_-WW_WW-_-SkillsNetwork-Courses-IBMDeveloperSkillsNetwork-DB0201EN-SkillsNetwork-20127838&cm_mmca1=000026UJ&cm_mmca2=10006555&cm_mmca3=M12345678&cvosrc=email.Newsletter.M12345678&cvo_campaign=000026UJ) This dataset includes a large number of metrics. Start by familiarizing yourself with the types of metrics in the database: [https://data.cityofchicago.org/api/assets/AAD41A13-BE8A-4E67-B1F5-86E711E09D5F?download=true](https://data.cityofchicago.org/api/assets/AAD41A13-BE8A-4E67-B1F5-86E711E09D5F?download=true&cm_mmc=Email_Newsletter-_-Developer_Ed%2BTech-_-WW_WW-_-SkillsNetwork-Courses-IBMDeveloperSkillsNetwork-DB0201EN-SkillsNetwork-20127838&cm_mmca1=000026UJ&cm_mmca2=10006555&cm_mmca3=M12345678&cvosrc=email.Newsletter.M12345678&cvo_campaign=000026UJ) **NOTE**: Do not download the dataset directly from City of Chicago portal. Instead download a static copy which is a more database friendly version from this link. Now review some of its contents. ### Store the dataset in a Table In many cases the dataset to be analyzed is available as a .CSV (comma separated values) file, perhaps on the internet. To analyze the data using SQL, it first needs to be stored in the database. While it is easier to read the dataset into a Pandas dataframe and then PERSIST it into the database as we saw in the previous lab, it results in mapping to default datatypes which may not be optimal for SQL querying. For example a long textual field may map to a CLOB instead of a VARCHAR. Therefore, **it is highly recommended to manually load the table using the database console LOAD tool, as indicated in Week 2 Lab 1 Part II**. The only difference with that lab is that in Step 5 of the instructions you will need to click on create "(+) New Table" and specify the name of the table you want to create and then click "Next". ##### Now open the Db2 console, open the LOAD tool, Select / Drag the .CSV file for the CHICAGO PUBLIC SCHOOLS dataset and load the dataset into a new table called **SCHOOLS**. ### Connect to the database Let us now load the ipython-sql extension and establish a connection with the database <jupyter_code>%load_ext sql # Enter the connection string for your Db2 on Cloud database instance below # %sql ibm_db_sa://my-username:my-password@my-hostname:my-port/my-db-name %sql ibm_db_sa://hxf94385:ccns84zmbdz5bs%[email protected]:50000/BLUDB<jupyter_output><empty_output><jupyter_text>### Query the database system catalog to retrieve table metadata ##### You can verify that the table creation was successful by retrieving the list of all tables in your schema and checking whether the SCHOOLS table was created <jupyter_code># type in your query to retrieve list of all tables in the database for your db2 schema (username) %sql select * from SYSCAT.TABLES where TABNAME = 'SCHOOLS' <jupyter_output> * ibm_db_sa://hxf94385:***@dashdb-txn-sbox-yp-dal09-08.services.dal.bluemix.net:50000/BLUDB Done. <jupyter_text>Click here for the solution ```python #In Db2 the system catalog table called SYSCAT.TABLES contains the table metadata %sql select TABSCHEMA, TABNAME, CREATE_TIME from SYSCAT.TABLES where TABSCHEMA='YOUR-DB2-USERNAME' or, you can retrieve list of all tables where the schema name is not one of the system created ones: %sql select TABSCHEMA, TABNAME, CREATE_TIME from SYSCAT.TABLES \ where TABSCHEMA not in ('SYSIBM', 'SYSCAT', 'SYSSTAT', 'SYSIBMADM', 'SYSTOOLS', 'SYSPUBLIC') or, just query for a specifc table that you want to verify exists in the database %sql select * from SYSCAT.TABLES where TABNAME = 'SCHOOLS' ``` ### Query the database system catalog to retrieve column metadata ##### The SCHOOLS table contains a large number of columns. How many columns does this table have? <jupyter_code># type in your query to retrieve the number of columns in the SCHOOLS table %sql select count(*) from SYSCAT.COLUMNS where TABNAME = 'SCHOOLS'<jupyter_output> * ibm_db_sa://hxf94385:***@dashdb-txn-sbox-yp-dal09-08.services.dal.bluemix.net:50000/BLUDB Done. <jupyter_text>Click here for the solution ```python #In Db2 the system catalog table called SYSCAT.COLUMNS contains the column metadata %sql select count(*) from SYSCAT.COLUMNS where TABNAME = 'SCHOOLS' ``` Now retrieve the the list of columns in SCHOOLS table and their column type (datatype) and length. <jupyter_code># type in your query to retrieve all column names in the SCHOOLS table along with their datatypes and length %sql select COLNAME, TYPENAME, LENGTH from SYSCAT.COLUMNS where TABNAME = 'SCHOOLS' <jupyter_output> * ibm_db_sa://hxf94385:***@dashdb-txn-sbox-yp-dal09-08.services.dal.bluemix.net:50000/BLUDB Done. <jupyter_text>Click here for the solution ```python %sql select COLNAME, TYPENAME, LENGTH from SYSCAT.COLUMNS where TABNAME = 'SCHOOLS' or %sql select distinct(NAME), COLTYPE, LENGTH from SYSIBM.SYSCOLUMNS where TBNAME = 'SCHOOLS' ``` ### Questions 1. Is the column name for the "SCHOOL ID" attribute in upper or mixed case? 2. What is the name of "Community Area Name" column in your table? Does it have spaces? 3. Are there any columns in whose names the spaces and paranthesis (round brackets) have been replaced by the underscore character "_"? ## Problems ### Problem 1 ##### How many Elementary Schools are in the dataset? <jupyter_code>%sql select count(*) from SCHOOLS WHERE "Elementary, Middle, or High School" = 'ES'<jupyter_output> * ibm_db_sa://hxf94385:***@dashdb-txn-sbox-yp-dal09-08.services.dal.bluemix.net:50000/BLUDB Done. <jupyter_text>Click here for the hint ```python Which column specifies the school type e.g. 'ES', 'MS', 'HS'? Does the column name have mixed case, spaces or other special characters? If so, ensure you use double quotes around the "Name of the Column" ``` Click here for the solution ```python %sql select count(*) from SCHOOLS where "Elementary, Middle, or High School" = 'ES' Correct answer: 462 ``` ### Problem 2 ##### What is the highest Safety Score? <jupyter_code>%sql select max("SAFETY_SCORE") AS SAFETY_SCORE from SCHOOLS<jupyter_output> * ibm_db_sa://hxf94385:***@dashdb-txn-sbox-yp-dal09-08.services.dal.bluemix.net:50000/BLUDB Done. <jupyter_text>Click here for the solution ```python # Use the MAX() function %sql select MAX(Safety_Score) AS MAX_SAFETY_SCORE from SCHOOLS Correct answer: 99 ``` ### Problem 3 ##### Which schools have highest Safety Score? <jupyter_code>%sql select NAME_OF_SCHOOL, SAFETY_SCORE from SCHOOLS where SAFETY_SCORE = ( select max("SAFETY_SCORE") AS SAFETY_SCORE from SCHOOLS)<jupyter_output> * ibm_db_sa://hxf94385:***@dashdb-txn-sbox-yp-dal09-08.services.dal.bluemix.net:50000/BLUDB Done. <jupyter_text>Click here for the solution ```python # In the previous problem we found out that the highest Safety Score is 99, so we can use that as an input in the where clause: %sql select Name_of_School, Safety_Score from SCHOOLS where Safety_Score = 99 or, a better way: %sql select Name_of_School, Safety_Score from SCHOOLS where \ Safety_Score= (select MAX(Safety_Score) from SCHOOLS) Correct answer: several schools with with Safety Score of 99. ``` ### Problem 4 ##### What are the top 10 schools with the highest "Average Student Attendance"? <jupyter_code>%sql select NAME_OF_SCHOOL, AVERAGE_STUDENT_ATTENDANCE from SCHOOLS ORDER BY AVERAGE_STUDENT_ATTENDANCE DESC LIMIT 10<jupyter_output> * ibm_db_sa://hxf94385:***@dashdb-txn-sbox-yp-dal09-08.services.dal.bluemix.net:50000/BLUDB Done. <jupyter_text>Click here for the solution ```python %sql select Name_of_School, Average_Student_Attendance from SCHOOLS \ order by Average_Student_Attendance desc nulls last limit 10 ``` ### Problem 5 ##### Retrieve the list of 5 Schools with the lowest Average Student Attendance sorted in ascending order based on attendance <jupyter_code>%sql select NAME_OF_SCHOOL, AVERAGE_STUDENT_ATTENDANCE from SCHOOLS ORDER BY AVERAGE_STUDENT_ATTENDANCE ASC LIMIT 5<jupyter_output> * ibm_db_sa://hxf94385:***@dashdb-txn-sbox-yp-dal09-08.services.dal.bluemix.net:50000/BLUDB Done. <jupyter_text>Click here for the solution ```python %sql SELECT Name_of_School, Average_Student_Attendance \ from SCHOOLS \ order by Average_Student_Attendance \ fetch first 5 rows only ``` ### Problem 6 ##### Now remove the '%' sign from the above result set for Average Student Attendance column <jupyter_code>%sql select NAME_OF_SCHOOL, REPLACE( AVERAGE_STUDENT_ATTENDANCE, '%', '') from SCHOOLS ORDER BY AVERAGE_STUDENT_ATTENDANCE ASC LIMIT 5<jupyter_output> * ibm_db_sa://hxf94385:***@dashdb-txn-sbox-yp-dal09-08.services.dal.bluemix.net:50000/BLUDB Done. <jupyter_text>Click here for the solution ```python #Use the REPLACE() function to replace '%' with '' #See documentation for this function at: https://www.ibm.com/support/knowledgecenter/en/SSEPGG_10.5.0/com.ibm.db2.luw.sql.ref.doc/doc/r0000843.html %sql SELECT Name_of_School, REPLACE(Average_Student_Attendance, '%', '') \ from SCHOOLS \ order by Average_Student_Attendance \ fetch first 5 rows only ``` ### Problem 7 ##### Which Schools have Average Student Attendance lower than 70%? <jupyter_code>%sql select NAME_OF_SCHOOL, REPLACE( AVERAGE_STUDENT_ATTENDANCE, '%', '') from SCHOOLS where AVERAGE_STUDENT_ATTENDANCE < '70'<jupyter_output> * ibm_db_sa://hxf94385:***@dashdb-txn-sbox-yp-dal09-08.services.dal.bluemix.net:50000/BLUDB Done. <jupyter_text>Click here for the hint ```python The datatype of the "Average_Student_Attendance" column is varchar. So you cannot use it as is in the where clause for a numeric comparison. First use the CAST() function to cast it as a DECIMAL or DOUBLE e.g. CAST("Column_Name" as DOUBLE) or simply: DECIMAL("Column_Name") Don't forget the '%' age sign needs to be removed before casting ``` Click here for the solution ```python %sql SELECT Name_of_School, Average_Student_Attendance \ from SCHOOLS \ where CAST ( REPLACE(Average_Student_Attendance, '%', '') AS DOUBLE ) < 70 \ order by Average_Student_Attendance or, %sql SELECT Name_of_School, Average_Student_Attendance \ from SCHOOLS \ where DECIMAL ( REPLACE(Average_Student_Attendance, '%', '') ) < 70 \ order by Average_Student_Attendance ``` ### Problem 8 ##### Get the total College Enrollment for each Community Area <jupyter_code>%sql select sum(COLLEGE_ENROLLMENT) as TOTAL_ENROLLMENT , COMMUNITY_AREA_NAME from SCHOOLS GROUP BY COMMUNITY_AREA_NAME <jupyter_output> * ibm_db_sa://hxf94385:***@dashdb-txn-sbox-yp-dal09-08.services.dal.bluemix.net:50000/BLUDB Done. <jupyter_text>Click here for the hint ```python Verify the exact name of the Enrollment column in the database Use the SUM() function to add up the Enrollments for each Community Area Don't forget to group by the Community Area ``` Click here for the solution ```python %sql select Community_Area_Name, sum(College_Enrollment) AS TOTAL_ENROLLMENT \ from SCHOOLS \ group by Community_Area_Name ``` ### Problem 9 ##### Get the 5 Community Areas with the least total College Enrollment sorted in ascending order <jupyter_code>%sql select sum(COLLEGE_ENROLLMENT) as TOTAL_ENROLLMENT , COMMUNITY_AREA_NAME from SCHOOLS GROUP BY COMMUNITY_AREA_NAME ORDER BY TOTAL_ENROLLMENT ASC LIMIT 5 <jupyter_output> * ibm_db_sa://hxf94385:***@dashdb-txn-sbox-yp-dal09-08.services.dal.bluemix.net:50000/BLUDB Done. <jupyter_text>Click here for the solution ```python # Order the previous query and limit the number of rows you fetch %sql select Community_Area_Name, sum(College_Enrollment) AS TOTAL_ENROLLMENT \ from SCHOOLS \ group by Community_Area_Name \ order by TOTAL_ENROLLMENT asc \ fetch first 5 rows only ``` ### Problem 10 ##### Get the hardship index for the community area which has College Enrollment of 4368 <jupyter_code>%%sql select hardship_index from chicago_socioeconomic_data C, schools S where C.ca = S.community_area_number and college_enrollment = 4368<jupyter_output> * ibm_db_sa://hxf94385:***@dashdb-txn-sbox-yp-dal09-08.services.dal.bluemix.net:50000/BLUDB Done. <jupyter_text>Click here for the solution ```python # For this solution to work the CHICAGO_SOCIOECONOMIC_DATA table as created in the last lab of Week 3 should already exist %%sql select hardship_index from chicago_socioeconomic_data CD, schools CPS where CD.ca = CPS.community_area_number and college_enrollment = 4368 ``` ### Problem 11 ##### Get the hardship index for the community area which has the highest value for College Enrollment <jupyter_code>%sql select ca, community_area_name, hardship_index from chicago_socioeconomic_data \ where ca in \ ( select community_area_number from schools order by college_enrollment desc limit 1 )<jupyter_output> * ibm_db_sa://hxf94385:***@dashdb-txn-sbox-yp-dal09-08.services.dal.bluemix.net:50000/BLUDB Done.
no_license
/data-set using SQL and Python.ipynb
jessicambs/python
15
<jupyter_start><jupyter_text># make url dictionary<jupyter_code>work_path = os.open('...',os.O_RDONLY) files = os.listdir(work_path) files= sorted(files) files = files[1:] end_bid = files.index('...') files = files[:end_bid] files.remove('...') work_path = '...' url_dict = {} url_id = 0 for file in files: temp_csv = pd.read_csv(work_path + file, sep = '\t', header = None, compression='gzip') for url in temp_csv[4].unique(): if url not in url_dict: url_dict[url] = url_id url_id = url_id+1 del temp_csv print(file[-11:-7], end= ' ') np.save('url_dict.npy', url_dict) paring_table_path = '...' paring_table = pd.read_csv(paring_table_path, compression='gzip') paring_table = paring_table.dropna() uuid_list = paring_table.uuid.values vuid_list = paring_table.vuid.values uuid_dict = {} for i in range(len(uuid_list)): uuid_dict[uuid_list[i]] = vuid_list[i] np.save('id_dict.npy', uuid_dict) id_dict['U-uWwcCo3j8AAONDFIcAAABd']<jupyter_output><empty_output><jupyter_text># Analysis for Url<jupyter_code>temp_csv = pd.read_csv(work_path + '...', sep = '\t', header = None, compression='gzip') temp_csv2 = pd.read_csv(work_path + '...', sep = '\t', header = None, compression='gzip') _view_cnt = pd.DataFrame(temp_csv2[2].value_counts()) _view_cnt = uu_view_cnt.reset_index() _view_cnt.columns = ['id', 'view_count'] uu_merge = uu_view_cnt.merge(paring_table, left_on='id', right_on='id', how='inner') mmp = temp_csv.loc[temp_csv[2] == 'WMvzpsCo4VcAAC3QP6oAAAAA'][4].value_counts() sns.distplot(np.log(1+temp_csv[2].value_counts().values), rug=False, hist=False, color='r', label='Wednesday') # Wednesday sns.distplot(np.log(1+temp_csv2[2].value_counts().values), rug=False, hist=False, label='sunday') # sunday plt.plot([np.log(1 + 500),np.log(1 + 500)], [0.0,0.25],'g') plt.xlabel('np.log(1+ #of_view)') plt.ylabel('density of probability') sns.distplot(np.log10(mmp).values, rug=False, hist=False) h1 = plt.hist(temp_csv[2].value_counts().values, bins=200,normed=True) plt.clf() plt.plot(h1[1][:-1], np.add.accumulate(h1[0])/np.sum(h1[0])) url_dict = np.load('url_dict.npy') df_dict = url_dict.item() temp_csv = temp_csv.iloc[:, [2,4]] temp_csv = temp_csv.dropna() temp_csv[4] = temp_csv[4].map(lambda x: df_dict[x]) temp_csv[4] = temp_csv[4].map(lambda x: str(x)) df_session = temp_csv.groupby(2).apply(lambda x: ' '.join(list(x[4]))) df_session = pd.DataFrame(df_session) df_session = df_session.reset_index() df_session.columns = ['uuid', 'url_session'] df_session.url_session = df_session.url_session.map(lambda x: x.split(' ')) df_session['session_length'] = df_session.url_session.map(lambda x: len(x)) df_order = pd.read_csv('' + 'pay_order.tsv.gz', sep = '\t', compression='gzip') df_order_user_list = df_order.account_id.value_counts() df_order_user_list = pd.DataFrame(df_order_user_list) df_order_user_list = df_order_user_list.reset_index() df_order_user_list.columns = ['account_id','purchase_amount'] df_merge = paring_table.merge(df_order_user_list, left_on='vuid', right_on='account_id', how='inner') df_merge = df_merge[['id', 'id', 'purchase_amount']] df_finel = df_merge.merge(df_session, left_on='id', right_on='id', how='inner') df_finel.shape, df_session.shape df_finel.session_length.values sns.distplot(np.log(1 + df_finel.session_length.values), rug=False, hist=False) len(df_finel.loc[df_finel.session_length >=500]) len(df_finel.loc[df_finel.session_length >=1000]) df_finel.loc[df_finel.session_length>=5].shape temp_csv.loc[temp_csv[2] == 'Vy53x8Co4U8AAAE8xb8AAAAA'][4].value_counts().values<jupyter_output><empty_output>
no_license
/url_dict&analysisSSdata.ipynb
lzzscl/LSTM_RS_corss_domain
2
<jupyter_start><jupyter_text>### Balancing the class using SMOTE<jupyter_code>X = df2.loc[:, df2.columns != 'target'] y = df2.loc[:, df2.columns == 'target'] from imblearn.over_sampling import SMOTE os=SMOTE(random_state=0) X_train,X_test,y_train,y_test=train_test_split(X,y,test_size=0.3,random_state=0) columns=X_train.columns os_data_X,os_data_y=os.fit_sample(X_train,y_train) os_data_X=pd.DataFrame(data=os_data_X,columns=columns) os_data_y=pd.DataFrame(data=os_data_y,columns=['target']) #lets check the numer of data oversampeled print("length of oversampled data is ",len(os_data_X)) print("Number of lost deals in oversampled data",len(os_data_y[os_data_y['target']==0])) print("Number of won deals",len(os_data_y[os_data_y['target']==1])) print("Proportion of lost deal data in oversampled data is ",len(os_data_y[os_data_y['target']==0])/len(os_data_X)) print("Proportion of won deal data in oversampled data is ",len(os_data_y[os_data_y['target']==1])/len(os_data_X))<jupyter_output>length of oversampled data is 8702 Number of lost deals in oversampled data 4351 Number of won deals 4351 Proportion of lost deal data in oversampled data is 0.5 Proportion of won deal data in oversampled data is 0.5 <jupyter_text>### Recursive Feature Elimination ###### REF is based on the idea to repeatedly construct a model and choose either the best or worst performing feature, setting the feature aside and then repeating the process with the rest of the features. This process is applied until all features in the dataset are exhausted. The goal of RFE is to select features by recursively considering smaller and smaller sets of features.<jupyter_code>data_final_vars=df2.columns.values.tolist() y=['target'] X=[i for i in data_final_vars if i not in y] from sklearn.feature_selection import RFE from sklearn.linear_model import LogisticRegression logreg = LogisticRegression() rfe = RFE(logreg, 20) rfe = rfe.fit(os_data_X, os_data_y.values.ravel()) print(rfe.support_) print(rfe.ranking_) import statsmodels.api as sm logit_model=sm.Logit(y,X) result=logit_model.fit() print(result.summary2())<jupyter_output>Optimization terminated successfully. Current function value: 0.630664 Iterations 6 Results: Logit ================================================================================================ Model: Logit Pseudo R-squared: 0.046 Dependent Variable: target AIC: 12708.9226 Date: 2021-02-12 18:57 BIC: 13199.0078 No. Observations: 9968 Log-Likelihood: -6286.5 Df Model: 67 LL-Null: -6592.4 Df Residuals: 9900 LLR p-value: 6.0306e-89 Converged: 1.0000 Scale: 1.0000 No. Iterations: 6.0000 [...]<jupyter_text>#### The p-values analysis <jupyter_code>cols=['Client Category','Solution Type','VP Name','Manager Name','year'] X=os_data_X[cols] y=os_data_y['target'] logit_model=sm.Logit(y,X) result=logit_model.fit() print(result.summary2()) <jupyter_output><empty_output><jupyter_text>### Logistic Regression Model Fitting<jupyter_code>from sklearn.linear_model import LogisticRegression from sklearn import metrics X_train,X_test,y_train,y_test=train_test_split(X,y,test_size=0.3,random_state=0) logreg=LogisticRegression() logreg.fit(X_train,y_train) <jupyter_output><empty_output><jupyter_text>### Predicting the test set results and calculating the accuracy<jupyter_code>y_pred=logreg.predict(X_test) print('accuracy:',logreg.score(X_test,y_test)) #Confusion matrix from sklearn.metrics import confusion_matrix confusion_matrix = confusion_matrix(y_test, y_pred) print(confusion_matrix)<jupyter_output>[[738 550] [644 679]] <jupyter_text>#### The result is telling us that we have [738+679] correct predictions and [644+550] incorrect predictions.<jupyter_code>from sklearn.metrics import classification_report print(classification_report(y_test, y_pred))<jupyter_output> precision recall f1-score support 0 0.53 0.57 0.55 1288 1 0.55 0.51 0.53 1323 accuracy 0.54 2611 macro avg 0.54 0.54 0.54 2611 weighted avg 0.54 0.54 0.54 2611 <jupyter_text>#### ROC curve<jupyter_code>from sklearn.metrics import roc_auc_score from sklearn.metrics import roc_curve logit_roc_auc = roc_auc_score(y_test, logreg.predict(X_test)) fpr, tpr, thresholds = roc_curve(y_test, logreg.predict_proba(X_test)[:,1]) plt.figure() plt.plot(fpr, tpr, label='Logistic Regression (area = %0.2f)' % logit_roc_auc) plt.plot([0, 1], [0, 1],'r--') plt.xlim([0.0, 1.0]) plt.ylim([0.0, 1.05]) plt.xlabel('False Positive Rate') plt.ylabel('True Positive Rate') plt.title('Receiver operating characteristic') plt.legend(loc="lower right") plt.savefig('Log_ROC') plt.show()<jupyter_output><empty_output>
no_license
/Win_Prediction_analysis.ipynb
Neetika-sys/Win-Prediction-Analytics
7
<jupyter_start><jupyter_text># Fictitious Names### Introduction: This time you will create a data again Special thanks to [Chris Albon](http://chrisalbon.com/) for sharing the dataset and materials. All the credits to this exercise belongs to him. In order to understand about it go [here](https://blog.codinghorror.com/a-visual-explanation-of-sql-joins/). ### Step 1. Import the necessary libraries<jupyter_code>import pandas as pd import numpy as np<jupyter_output><empty_output><jupyter_text>### Step 2. Create the 3 DataFrames based on the following raw data<jupyter_code>raw_data_1 = { 'subject_id': ['1', '2', '3', '4', '5'], 'first_name': ['Alex', 'Amy', 'Allen', 'Alice', 'Ayoung'], 'last_name': ['Anderson', 'Ackerman', 'Ali', 'Aoni', 'Atiches']} raw_data_2 = { 'subject_id': ['4', '5', '6', '7', '8'], 'first_name': ['Billy', 'Brian', 'Bran', 'Bryce', 'Betty'], 'last_name': ['Bonder', 'Black', 'Balwner', 'Brice', 'Btisan']} raw_data_3 = { 'subject_id': ['1', '2', '3', '4', '5', '7', '8', '9', '10', '11'], 'test_id': [51, 15, 15, 61, 16, 14, 15, 1, 61, 16]}<jupyter_output><empty_output><jupyter_text>### Step 3. Assign each to a variable called data1, data2, data3<jupyter_code>data1 = pd.DataFrame(raw_data_1) data2 = pd.DataFrame(raw_data_2) data3 = pd.DataFrame(raw_data_3)<jupyter_output><empty_output><jupyter_text>### Step 4. Join the two dataframes along rows and assign all_data<jupyter_code>all_data = pd.concat([data1, data2]) all_data<jupyter_output><empty_output><jupyter_text>### Step 5. Join the two dataframes along columns and assing to all_data_col<jupyter_code>all_data_col = pd.concat([data1, data2], axis=1) all_data_col<jupyter_output><empty_output><jupyter_text>### Step 6. Print data3<jupyter_code>data3<jupyter_output><empty_output><jupyter_text>### Step 7. Merge all_data and data3 along the subject_id value<jupyter_code>pd.merge(all_data, data3, on='subject_id')<jupyter_output><empty_output><jupyter_text>### Step 8. Merge only the data that has the same 'subject_id' on both data1 and data2<jupyter_code>pd.merge(data1, data2, on='subject_id', how='inner')<jupyter_output><empty_output><jupyter_text>### Step 9. Merge all values in data1 and data2, with matching records from both sides where available.<jupyter_code>pd.merge(data1, data2, on='subject_id', how='outer')<jupyter_output><empty_output>
no_license
/05_Merge/Fictitous Names/Exercises.ipynb
yuramayer/pandas_ex
9
<jupyter_start><jupyter_text>## Identifying the Dataset to be worked upon for the Project: Identifying the Key that corresponds to the Table name to be invoked:<jupyter_code>import hashlib hashlib.md5("hrs170003".encode('utf-8')).hexdigest() import pandas as pd pd.read_csv("Project1Dataset.csv")<jupyter_output><empty_output><jupyter_text>Using the 'project.csv' file provided, the above key corresponds to the 'cps91' table from the 'wooldridge2.db' database. Hence, importing that table from the database:<jupyter_code>import sqlite3 con = sqlite3.connect('wooldridge2.db') dataset = pd.read_sql('SELECT * FROM cps91',con) print(dataset) dataset.to_csv("Project1Dataset.csv")<jupyter_output> index husage husunion husearns huseduc husblck hushisp hushrs \ 0 0 42 0.0 568 14 0 0 40 1 1 26 0.0 600 14 0 0 0 2 2 56 0.0 1500 14 0 0 40 3 3 35 NaN 0 12 0 0 40 4 4 42 0.0 450 11 0 0 45 5 5 55 0.0 465 13 0 0 50 6 6 68 NaN 0 16 0 0 0 7 7 48 0.0 492 10 0 1 48 8 8 38 NaN 0 14 0 0 65 9 9 48 1.0 1923 12 0 0 21 10 10 56 0.0 450 12 0 0 40 11 11 37 0.0 523 12 0 [...]<jupyter_text>## Data dictionary: ![dataDictForProject1.jpg](attachment:dataDictForProject1.jpg)## Dataset cleansing and Data Preprocessing: To be able to perform any sort of Exploratory Data Analysis, the dataset needs to be cleansed off unnecessary columns, redundant columns, missing values, etc.<jupyter_code>print(dataset.columns) # dropping the 'index' and 'expersq' columns because they are unnecessary for analysis here: dataset = dataset.drop('index', axis=1) dataset = dataset.drop('expersq', axis=1) print(dataset.columns) # taking only those rows into consideration which do not have a missing value cleansed_data = dataset[(dataset.husunion.notnull()) & (dataset.union.notnull()) & (dataset.hrwage.notnull())] cleansed_data.to_csv("Project1Dataset.csv") cleansed_data.head()<jupyter_output><empty_output><jupyter_text>## Exploratory Data Analysis: Let us first find out the statistical summary of only the numerical variables in our dataset. Some of the things that this summary shall talk about are the means, minimum values, maximum values, standard deviations, 25th-50th-75th quartiles of the data in the various columns of the dataset, etc.<jupyter_code># statistical summary using .describe() cleansed_data[['age','husage','educ','huseduc','exper','husexp','hours','hushrs','earns','husearns','faminc','nwifeinc']].describe()<jupyter_output><empty_output><jupyter_text>### Statistical Inferences: The following inferences are made from the above statistical summary: 1. The mean values, minimum values and maximum values of age of a wife and a husband is not very different. 2. Even though the wife and the husband has the same average number of years of educations and almost the same years of experience, the wife earns far less than what the husband earns weekly. This may be because a husband works more hours in a week than a wife does. 3. A non-wife earns far more annually than a wife does.### Bar Plots: The following two plots show the age-wise distribution of wives and husbands respectively:<jupyter_code>import matplotlib.pyplot as plt import numpy as np data = pd.read_csv("Project1Dataset.csv") # most women are between ages 30 and 40 plt.hist(data.age, bins=6, color='blue') plt.xlabel('Age groups') plt.ylabel('No. of women') plt.xticks(range(10,70,10)) plt.yticks(range(0,800,100)) plt.title('Wives age distribution') plt.show() # most men are between ages 35 and 45 plt.hist(data.husage, bins=6) plt.xlabel('Age groups') plt.ylabel('No. of men') plt.xticks(range(10,70,10)) plt.yticks(range(0,900,100)) plt.title('Husbands age distribution') plt.show()<jupyter_output><empty_output><jupyter_text>###### Box plot Inferences: 1. The modal age group for women is 30-40 years of age, which is also the class group where their mean lies (~ 38 years of age) 2. The modal age group for men is 35-45 years of age, which is also the class group where their mean lies (~ 40 years of age) 3. We can also see from the following plot that about 1/3rd (~800 wives and husbands) of the working population lies between the age of 35-45 years.<jupyter_code># most of the working wives and husbands lie in the age group of 35-45 years w_age = data['age'] h_age = data['husage'] legend = ['Women', 'Men'] plt.hist([w_age,h_age], color=['blue','red'], bins=6) plt.xlabel('Age groups') plt.ylabel('No. of People') plt.legend(legend) plt.xticks(range(15,76,10)) plt.yticks(range(0,1000,100)) plt.title('Age-wise distribution plot') plt.show()<jupyter_output><empty_output><jupyter_text>###### -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- Comparative Analysis and Inference: - The following plots of comparison show that a majority of women have obtained more years of education than men. - They also have equal or more experience than men. - Yet, most men earn more than women do. - Also, husbands contribute more to the family financially than wives do.<jupyter_code># most of the wives have more years of education than men do w_educ = data['educ'] h_educ = data['huseduc'] legend = ['Women', 'Men'] plt.hist([w_educ,h_educ], color=['blue','red'], bins=10) plt.xlabel('No. of years of Education') plt.ylabel('No. of People') plt.legend(legend) plt.xticks(range(0,20,2)) plt.yticks(range(0,1200,100)) plt.title('Gender-wise Education years') plt.show() # most of the wives have more years of experience than men do w_exp = data['exper'] h_exp = data['husexp'] legend = ['Women', 'Men'] plt.hist([w_exp,h_exp], color=['blue','red'], bins=6) plt.xlabel('No. of years of Experience') plt.ylabel('No. of People') plt.legend(legend) plt.xticks(range(0,60,10)) plt.yticks(range(0,1000,100)) plt.title('Gender-wise Experience years') plt.show() # yet, most husbands earn more than wives do w_earns = data['earns'] h_earns = data['husearns'] legend = ['Wife', 'Husband'] plt.hist([w_earns,h_earns], color=['blue','red']) plt.xlabel('Weekly earnings') plt.ylabel('Frequeancy') plt.legend(legend) plt.xticks(range(0,3000,250)) plt.yticks(range(0,1500,250)) plt.title('Gender-wise earnings') plt.show() # Husbands contribute more to the family financially than wives do hus_contri = pd.read_csv('Project1Dataset.csv')['husearns'].mean()*52 wife_contri = pd.read_csv('Project1Dataset.csv')['earns'].mean()*52 nWife_contri = pd.read_csv('Project1Dataset.csv')['nwifeinc'].mean()*1000 labels = 'Husband', 'Wife', 'Non-wife' sizes = [hus_contri,wife_contri,nWife_contri] sizes.sort(reverse=True) explode = (0.1, 0, 0) fig1, ax1 = plt.subplots() ax1.pie(sizes, explode=explode, labels=labels, autopct='%1.1f%%', shadow=True, startangle=90) ax1.axis('equal') plt.title('Financial contribution to the family') plt.show()<jupyter_output><empty_output><jupyter_text>### Correlation matrix: The following correlation matrix is plotted to identify strong relations among the different variables in the dataset.<jupyter_code>data = pd.read_csv('Project1Dataset.csv') corr = data.corr() corr.style.background_gradient(cmap='coolwarm').set_precision(2)<jupyter_output><empty_output><jupyter_text>###### According to the correlation matrix above, the following inferences are made: 1. There is a strong relation between the wife's and the husband's age (~0.89-0.90). This indicates that women generally get married to men who are close to their age. 2. There is a strong relation between the race of the wives and their husbands. This means that women generally marry men who belong to their race: - black couple(~ 0.95) - hispanic couple(~ 0.76) (refer graph plots given below) 3. The couple's experience and their age shows a direct relation (~ 0.97). 4. There is a strong relationship between the family income (wife's income + husband's income) and the non-wife's income(~ 0.91). This indicates that those families where the couple's income is low/zero, are financially dependant on the non-wife's income.<jupyter_code># women marry men of their same race data = pd.read_csv("Project1Dataset.csv") w_black = data['black'] h_black = data['husblck'] legend = ['Black woman', 'Black man'] plt.hist([w_black,h_black], color=['blue','red'], bins=2) plt.xlabel('Black Couple') plt.ylabel('No. of Couples') plt.legend(legend) plt.xticks(range(0,2)) plt.yticks(range(0,2)) plt.title('Black couples') plt.show() w_hisp = data['hispanic'] h_hisp = data['hushisp'] legend = ['Hispanic woman', 'Hispanic man'] plt.hist([w_hisp,h_hisp], color=['blue','red'], bins=2) plt.xlabel('Hispanic couple') plt.ylabel('No. of Couples') plt.legend(legend) plt.xticks(range(0,2)) plt.yticks(range(0,2)) plt.title('Hispanic Couple') plt.show()<jupyter_output><empty_output><jupyter_text>It can be seen clearly that women usually marry men who belong to their same race. ###### -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- ### Linear Regression to predict Wife's income: Based on the correlation matrix, we can predict the wife's income using a wife's years of education and years of experience as predictor variables<jupyter_code>from sklearn.linear_model import LinearRegression from pandas import DataFrame from matplotlib.pyplot import * from scipy.interpolate import * import numpy style.use('ggplot') data = pd.read_csv('Project1Dataset.csv') X2 = data.iloc[:2000, 12].values.reshape(-1, 1) X3 = data.iloc[:2000, 17].values.reshape(-1, 1) Y = data.iloc[:2000, 9].values.reshape(-1, 1) # -1 to calculate the dimension of rows, but 1 column X_pred2= data.iloc[2000:,12].values.reshape(-1, 1) X_pred3 = data.iloc[2000:,17].values.reshape(-1, 1) X = pd.DataFrame(X2) X['exper'] = X3 X_pred = pd.DataFrame(X_pred2) X_pred['exper'] = X_pred3 Y = pd.DataFrame(Y) Y_real = data.iloc[2000:,9].values.reshape(-1, 1) linear_regressor = LinearRegression() # object of class linear_regressor.fit(X, Y) Y_pred = linear_regressor.predict(X_pred) new = pd.DataFrame(X_pred) new['Predicted income for wife'] = Y_pred new['Wife current earnings'] = Y_real new['difference'] = Y_pred - Y_real Y_pred = pd.DataFrame(Y_pred) Y_real = pd.DataFrame(Y_real) x = Y_pred y = Y_real x = numpy.array(x[0]) y = numpy.array(y[0]) z = np.polyfit(x, y, 1) p = np.poly1d(z) plt.plot(x,p(x),"r--") plt.scatter(x, y, c='blue') plt.show()<jupyter_output><empty_output>
no_license
/MarriedWomenIncomePrediction .ipynb
drishti255/MarriedWomenIncomePrediction
10
<jupyter_start><jupyter_text># Series2Graph Demo This notebook describe and display all the step that series2graph preforms in order to detect abnormal subsequences in a time series.<jupyter_code>import matplotlib.pyplot as plt from series2graph import *<jupyter_output><empty_output><jupyter_text>## Demo on an Synthetic time series The full process of Time2graph applied on a synthetic time series corresponding to a sinusoid pattern with gaussian noise added to a random walk trend.<jupyter_code>import pandas as pd df = pd.read_csv("../DATA/Synthetic.ts",header=None) print("Time Series Statistics:") print("Number of points: {}".format(len(df)))<jupyter_output>Time Series Statistics: Number of points: 112000 <jupyter_text>## Parameters setting<jupyter_code>pattern_length = 75 query_length = 100<jupyter_output><empty_output><jupyter_text>## Computing the Graph<jupyter_code>s2g = Series2Graph(pattern_length=pattern_length) s2g.fit(df) print("Graph Statistics:") print("Number of nodes: {}".format(s2g.graph['Graph'].number_of_nodes())) print("Number of edges: {}".format(s2g.graph['Graph'].number_of_edges()))<jupyter_output>Graph Statistics: Number of nodes: 73 Number of edges: 268 <jupyter_text>### Visualization of the embedding space<jupyter_code>plt.figure(figsize=(10,10)) plt.plot(s2g.graph['proj_A']['0'],s2g.graph['proj_A']['1']) plt.title("SProj(T,l,lambda)")<jupyter_output><empty_output><jupyter_text>### Visualization of the graph<jupyter_code>s2g.plot_graph()<jupyter_output><empty_output><jupyter_text>## Anomalies detection<jupyter_code>s2g.score(query_length)<jupyter_output><empty_output><jupyter_text>### Visualization of the full time series<jupyter_code>fig,ax = plt.subplots(2,1,figsize=(20,4)) ax[0].plot(df[0].values[0:len(s2g.all_score)]) ax[1].plot(s2g.all_score) ax[0].set_xlim(0,len(s2g.all_score)) ax[1].set_xlim(0,len(s2g.all_score))<jupyter_output><empty_output><jupyter_text>### Visualization of a snippet<jupyter_code>fig,ax = plt.subplots(2,1,figsize=(20,4)) ax[0].plot(df[0].values[0:len(s2g.all_score)]) ax[1].plot(s2g.all_score) ax[0].set_xlim(27500,38000) ax[1].set_xlim(27500,38000)<jupyter_output><empty_output>
no_license
/SourceCode/example/Time2Graph_Demo_synthetic.ipynb
odedns/hackathon
9
<jupyter_start><jupyter_text># Modern Cryptography Generally speaking there are two *kinds* of encryption: symmetric and asymmetric. In symmetric encryption, the parties involved share the ***same*** key. In asymmetric encryption, the parties use ***different*** keys, that are mathematically ***related*** to each other. <jupyter_code>Image('/home/atrides/Desktop/Cryptography_Blockchain_101/Cryptography/pycon2017_crypto_tutorial/include/sym_vs_asym.png')<jupyter_output><empty_output><jupyter_text>## Symmetric Encryption In the following, we look at symmetric encryption algorithms. In symmetric crypto, we use the same key for encryption and decryption. **Therefore, the two parties need to establish a secret key between them.** Symmetric encryption can be up to 1000 times faster than asymmetric encryption. Given the support of some crypto algorithm in the CPU and at hardware level, even faster.## *Exercise* - How should we share this secret key? securely, of course!### Advanced Encryption Algorithm (AES) AES is based on Rijndael encryption algorithm, designed by Joan Daemen and Vincent Rijmen. It was one of the algorithms submitted to U.S. National Institute of Standards and Technology (NIST) to replace DES and 3DES. It was published in 1998 and accepted and standardized in 2001. * AES supports key sizes of 128/192/256 bits * Block size: 128 bit * It's iterative rather than Feistel cipher * Treats data in 4 groups of 4 bytes * Operates on an entire block in every round * Resistant against known attacks * Speed and code compactness on many CPUs * Rijndael block and key size vary between 128, 192, 256 * However, in AES block size in 128 * Number of rounds a function of key size * 128 bits 10 rounds * 192 bits 12 rounds * 256 bits 14 rounds * Today most implementations use the CPU support (Intel AES-NI)### Block cipher mode of operation To encrypt messages of arbitrary size with block ciphers, we use the following algorithms, called the modes of operation. They define how to encrypt each block of the plaintext to produce the corresponding cipher text block. Some of these are completely insecure (ECB) and should not be used. * Electronic Codebook (ECB) * Cipher Block Chaining (CBC) * Counter (CTR)### Electronic Codebook (ECB)<jupyter_code>Image("/home/atrides/Desktop/Cryptography_Blockchain_101/Cryptography/pycon2017_crypto_tutorial/include/ECB_enc.png") Image("/home/atrides/Desktop/Cryptography_Blockchain_101/Cryptography/pycon2017_crypto_tutorial/include/ECB_dec.png")<jupyter_output><empty_output><jupyter_text>### Cipher Block Chaining (CBC)<jupyter_code>Image("/home/atrides/Desktop/Cryptography_Blockchain_101/Cryptography/pycon2017_crypto_tutorial/include/CBC_enc.png") Image("/home/atrides/Desktop/Cryptography_Blockchain_101/Cryptography/pycon2017_crypto_tutorial/include/CBC_dec.png")<jupyter_output><empty_output><jupyter_text>### Counter (CTR)<jupyter_code>Image("/home/atrides/Desktop/Cryptography_Blockchain_101/Cryptography/pycon2017_crypto_tutorial/include/CTR_enc.png") Image("/home/atrides/Desktop/Cryptography_Blockchain_101/Cryptography/pycon2017_crypto_tutorial/include/CTR_dec.png") import os from cryptography.hazmat.primitives.ciphers import Cipher, algorithms, modes from cryptography.hazmat.backends import default_backend key = os.urandom(16) # in bytes, 128 bits iv = os.urandom(16) # ECB Mode, we only need a key ### *** DO NOT USE ECB. IT IS INSECURE *** ### cipher = Cipher(algorithms.AES(key), modes.ECB(), backend=default_backend()) encryptor = cipher.encryptor() # note that we don't need padding here, since len("PyCon 2017 Cypto") = 16 cipher_text = encryptor.update(b"PyCon 2017 Cypto") + encryptor.finalize() cipher_text print (len(cipher_text)) decryptor = cipher.decryptor() decryptor.update(cipher_text) + decryptor.finalize() # CBC Mode, we also need an IV cipher = Cipher(algorithms.AES(key), modes.CBC(iv), backend=default_backend()) encryptor = cipher.encryptor() # note that we don't need padding here, since len("PyCon 2017 Cypto") = 16 cipher_text = encryptor.update(b"PyCon 2017 Cypto") + encryptor.finalize() cipher_text decryptor = cipher.decryptor() decryptor.update(cipher_text) + decryptor.finalize() # CTR Mode, we don't need padding in CTR mode. In transforms a block cipher into a stream cipher # we only need to introduce the nonce cipher = Cipher(algorithms.AES(key), modes.CTR(os.urandom(16)), backend=default_backend()) encryptor = cipher.encryptor() # len(b"PyCon 2017 Cypto!!") = 18, however no padding is needed. cipher_text = encryptor.update(b"PyCon 2017 Cypto!!") + encryptor.finalize()<jupyter_output><empty_output><jupyter_text>## *Exercise* - Encrypt the file following text using the ECB, and CBC or CTR mode and compare the results.<jupyter_code>plain_text = b"PyCon is great!!" * 128 def print_text(text, b64=False): for i in range(0, 128, 16): if b64: pt = base64.b64encode(text[i:i+16]) else: pt = text[i:i+16] print (pt) ## IMPLEMENTATION # ECB cipher = Cipher(algorithms.AES(key), modes.ECB(), backend=default_backend()) encryptor = cipher.encryptor() cipher_text = encryptor.update(plain_text) + encryptor.finalize() print_text(cipher_text) # CBC cipher = Cipher(algorithms.AES(key), modes.CBC(iv), backend=default_backend()) encryptor = cipher.encryptor() cipher_text = encryptor.update(plain_text) + encryptor.finalize() print_text(cipher_text) # CTR cipher = Cipher(algorithms.AES(key), modes.CTR(iv), backend=default_backend()) encryptor = cipher.encryptor() cipher_text = encryptor.update(plain_text) + encryptor.finalize() print_text(cipher_text)<jupyter_output>b'\xf9\x80X\xa1w\xfa`\xc7\xb84\xec\xc8\\6\xe3F' b'g\x89\xe6\xe3\x1fT5\x11\x8c\x9b\xeas\xa0Z!\x0f' b'\x86?\xc0\xfe\x98\x13\xf4-\xf0\xcb*^z\xf0\xd0\xca' b'm;;\x10\xeb\xc7\x04f\xab\xa9\x0c\x98\x7f\xf3\x11\xdf' b'\xc2\xb2S.Zm\xa9\x04\xc7\xc0+|\x88\xdb\xdfx' b'\x15\x00\x94\xc8\x90\x99-\xfc5\x86\xc9DIs\x8d\x8c' b'\x99\x92R\xf8\x98L\xa5\xeb\xa1\th\xba\x8b\xc4\x99\x8e' b'\xd4\x9d4\xbb\x1b\xc4\xf0\xc2\x17\xc9#\xda#w\xe0\xd7' <jupyter_text>## *Extra Activity* Encrypt the file "include/tux.png" using the ECB, and CBC or CTR mode and compare the results. - **You need to install the [pillow](https://python-pillow.org/) library** - read the image file and get all the pixel values - encrypt the pixel values - create a new image with new (encrypted) pixel values - write your newly crafted image to diskTo install Pillow, simply use pip or conda: ```bash pip install Pillow conda install pillow ```<jupyter_code># original Image("/home/atrides/Desktop/Cryptography_Blockchain_101/Cryptography/pycon2017_crypto_tutorial/include/tux.png")<jupyter_output><empty_output><jupyter_text> ECB Encryption of the image with two different keys. The results (colors) are different, because we are using two different keys. However, the patterns inside the data (image) is not hidden. <jupyter_code>Image("/home/atrides/Desktop/Cryptography_Blockchain_101/Cryptography/pycon2017_crypto_tutorial/include/ECB2.png") Image("/home/atrides/Desktop/Cryptography_Blockchain_101/Cryptography/pycon2017_crypto_tutorial/include/ECB1.png")<jupyter_output><empty_output><jupyter_text> As compared to when we are using the CBC (or CTR) mode. Because we introduce the randomness at the beginning (IV), and we carry this randomness (noise) throughout the encryption the patterns are diminished. <jupyter_code>Image("/home/atrides/Desktop/Cryptography_Blockchain_101/Cryptography/pycon2017_crypto_tutorial/include/CBC1.png")<jupyter_output><empty_output><jupyter_text>## Encryption alone is not good enough Encrypting your data alone will not protect you from data tampering, meaning an adversary can change the results of your decryption without having access to the key -- all without you noticing. That's where HMACs discussed previously become handy.### Bit flipping attackSince the IV is sent in clear we can change the IV value and change the corresponding plaintext, when using CBC mode. ***Encryption*** - $C_{i}=E_{K}(P_{i}\oplus C_{i-1})$ - $ C_{0}=IV$ ***Decryption*** - $P_{i}=D_{K}(C_{i})\oplus C_{i-1}$ - $C_{0}=IV$ Therefore to change the plaintext value we just need to xor the old plaintext(p), and the new value (t), with the IV: $IV = IV \oplus p \oplus t$ Meaning if the first 4 bytes of the plaint text are: "1234" and we want to change it to "6789" all we have to do is $IV[0:4] = IV[0:4] \oplus 1234 \oplus 6789$## *Exercise* - Imagine the message is only 16 bytes, "PyCon2017 Crypto". Change the year from 2017 to 1991.<jupyter_code>def xor(s1, s2): return bytes([a ^ b for a,b in zip(s1,s2)]) iv = os.urandom(16) cipher = Cipher(algorithms.AES(key), modes.CBC(iv), backend=default_backend()) encryptor = cipher.encryptor() cipher_text = encryptor.update(b"PyCon2017 Crypto") + encryptor.finalize() cipher = Cipher(algorithms.AES(key), modes.CBC(iv), backend=default_backend()) decryptor = cipher.decryptor() decryptor.update(cipher_text) + decryptor.finalize() ## IMPLEMENTATION<jupyter_output><empty_output><jupyter_text>## Authenticated Encryption with Associated Data (AEAD) AEAD provides confidentiality, integrity, and authenticity at once. Such schemes help to mitigate against the bit flipping attacks that we just did. The Galois/Counter Mode (GCM) mode of operation is the recommended schemes to be used. Fortunately, the *cryptography* library already has it implemented.<jupyter_code># GCM Mode, we also need an IV cipher = Cipher(algorithms.AES(key), modes.GCM(iv), backend=default_backend()) encryptor = cipher.encryptor() # note that we don't need padding here, since len("PyCon2017 Crypto") = 16 encryptor.authenticate_additional_data(b"SOME ADDITIONAL DATA") cipher_text = encryptor.update(b"Snehil") + encryptor.finalize() tag = encryptor.tag decryptor = Cipher(algorithms.AES(key), modes.GCM(iv,tag), backend=default_backend()).decryptor() decryptor.authenticate_additional_data(b"SOME ADDITIONAL DATA") decryptor.update(cipher_text) + decryptor.finalize()<jupyter_output><empty_output><jupyter_text>## Padding With some block cipher mode of operations (e.g., CBC) we need to pad the data to the block size. Otherwise, if would throw an exception.<jupyter_code>import os from cryptography.hazmat.primitives.ciphers import Cipher, algorithms, modes from cryptography.hazmat.backends import default_backend key = os.urandom(16) # in bytes, 128 bits #CTR cipher = Cipher(algorithms.AES(key), modes.CTR(os.urandom(16)), backend=default_backend()) encryptor = cipher.encryptor() # len("PyCon 2017 Cryptography") = 23, but we don't need padding in CTR ctr_ct = encryptor.update(b"PyCon 2017 Cryptography") + encryptor.finalize() #CBC cipher = Cipher(algorithms.AES(key), modes.CBC(os.urandom(16)), backend=default_backend()) encryptor = cipher.encryptor() # len("PyCon 2017 Cryptography") = 23, throws an exception cbc_ct = encryptor.update(b"PyCon 2017 Cryptography") + encryptor.finalize()<jupyter_output><empty_output><jupyter_text>### Public-Key Cryptography Standards (PKCS) PKCS7 padding is described in RFC 5652. The number of missing bytes (n) to the whole block size is repeated n times. - For example if the block size is 16, - The data is of size 13, the data is padding with 3, 3 times. 03 03 03. - The data is of size 14, the data is padded with 2, 2 times. 02 02 <jupyter_code># padding from cryptography.hazmat.primitives import padding msg = b"Snehil" print("msg: ", msg) padder = padding.PKCS7(128).padder() padded_data = padder.update(msg) padded_data += padder.finalize() print("padded data: " ,padded_data)<jupyter_output>msg: b'Snehil' padded data: b'Snehil\n\n\n\n\n\n\n\n\n\n'
permissive
/01_cryptographyWithPython/02.symmetricEncryption/02. Symmetric Encryption.ipynb
snehilk1312/Cryptography101
12
<jupyter_start><jupyter_text># 성능별 노트북 인기 요소 - '다나와'에서 노트북 검색 후 제품, 가격, 스펙 등의 요소들을 DataFrame으로 읽어들임 - 성능별로 순위를 매기고 실제 인기 순위와 일치하는 정도를 파악 - 시각화도 더불어 진행<jupyter_code># 다나와 노트북 검색 1페이지 url_1page = '''http://search.danawa.com/dsearch.php?query=%EB%85%B8%ED%8A%B8%EB%B6%81&tab=main''' # 다나와 노트북 검색 2페이지 url_2page = '''http://search.danawa.com/dsearch.php? query={}&originalQuery={}&volumeType=allvs& page={}&limit=30&sort=saveDESC&list=list&boost=true&addDelivery=N&tab=goods''' # 다나와 노트북 검색 3페이지 url_3page = '''http://search.danawa.com/dsearch.php? query{}&originalQuery={}& previousKeyword={}&volumeType=allvs& page=3&limit=30&sort=saveDESC&list=list&boost=true&addDelivery=N&tab=goods&tab=goods''' '''http://search.danawa.com/dsearch.php?query={}&volumeType=allvs &page={}&limit=30&sort=saveDESC&list=list&boost=true&addDelivery=N&tab=goods''' # 해당 url에서 php의 query 변수와 page 변수를 원하는대로 맞춰주면 된다. # 다나 홈페이지에서 원하는 제품을 검색하는 함수 def searching(product,page=1): try: if page==1: url = 'http://search.danawa.com/dsearch.php?k1=%EB%85%B8%ED%8A%B8%EB%B6%81&module=goods&act=dispMain' else: url = '''http://search.danawa.com/dsearch.php?query={}&volumeType=allvs &page={}&limit=30&sort=saveDESC&list=list&boost=true&addDelivery=N&tab=goods'''.format(product,page) return url except: print("product는 문자열로, page는 숫자로 넣었는지 확인하시오.") url = searching('노트북',1) from selenium import webdriver driver = webdriver.Chrome('C:/Users/hp/Desktop/최영조/chromedriver.exe') driver.get(url) html = driver.page_source from bs4 import BeautifulSoup soup = BeautifulSoup(html,'html.parser') full_info = soup.select('div.prod_main_info') first_info = full_info[0] first_info name = first_info.select('p>a')[0].text name spec = first_info.select('div.spec_list')[0].text.replace("\t","").replace("\n","") spec price = int(first_info.select('p.price_sect strong')[0].text.replace(",","")) price # 여러 개의 가격이 표시되는 경우가 종종 있는데 기본 옵션만 포함된 첫 째 가격을 고려하도록 합시다. full_info[4].select('p.price_sect strong') searching('노트북',2) import pandas as pd import time from tqdm import tqdm_notebook # 1페이지 외에 다른 페이지들도 정상적으로 작동하는지 체크해본다. url = searching('노트북',2) driver.get(url) time.sleep(2) information=[] html = driver.page_source soup = BeautifulSoup(html,'html.parser') full_info = soup.select('div.prod_main_info') time.sleep(1) for i in tqdm_notebook(range(30)): item_info = full_info[i] name = item_info.select('p>a')[0].text spec = item_info.select('div.spec_list')[0].text.replace("\t","").replace("\n","") price = int(item_info.select('p.price_sect strong')[0].text.replace(",","")) information.append([name,spec,price]) information # 전체 페이지를 순환하는 과정이 문제가 없는지 체크해본다. information=[] from tqdm import tqdm_notebook import pandas as pd import time product='노트북' total_page=10 driver = webdriver.Chrome('C:/Users/hp/Desktop/최영조/chromedriver.exe') for k in tqdm_notebook(range(1,total_page+1)): url = searching(product,k) driver.get(url) time.sleep(2) html = driver.page_source soup = BeautifulSoup(html,'html.parser') full_info = soup.select('div.prod_main_info') time.sleep(1) if k == 1: l=32 else: l=30 for i in range(l): item_info = full_info[i] name = item_info.select('p>a')[0].text spec = item_info.select('div.spec_list')[0].text.replace("\t","").replace("\n","") # 이미 한번 돌려본 결과 품절된 제품은 가격이 표시되지 않아 에러가 났었음. 그러한 에러를 방지하기 위함 try: price = int(item_info.select('p.price_sect strong')[0].text.replace(",","")) except: price= None information.append([name,spec,price]) info2_df = pd.DataFrame(information,colums=['제품명','스펙','가격']) info2_df.head() # 정상적으로 작동한다면 이를 함수로 만들어 사용한다. def get_information(product,total_page): information=[] from tqdm import tqdm_notebook import pandas as pd import time driver = webdriver.Chrome('C:/Users/hp/Desktop/최영조/chromedriver.exe') for k in tqdm_notebook(range(1,total_page+1)): url = searching(product,k) driver.get(url) time.sleep(2) html = driver.page_source soup = BeautifulSoup(html,'html.parser') full_info = soup.select('div.prod_main_info') time.sleep(1) if k == 1: l=32 else: l=30 for i in range(l): item_info = full_info[i] name = item_info.select('p>a')[0].text spec = item_info.select('div.spec_list')[0].text.replace("\t","").replace("\n","") try: price = int(item_info.select('p.price_sect strong')[0].text.replace(",","")) except: price= None information.append([name,spec,price]) info_df = pd.DataFrame(information,columns=['제품명','스펙','가격']) return info_df result = get_information('노트북',10) result.head() result.isnull().sum() # 가격에 하나의 결측치가 존재 # 정상적으로 나온 결과를 엑셀로 저장함 result.to_excel('notebook.xlsx')<jupyter_output><empty_output><jupyter_text>## 크롤링해서 얻은 데이터를 전처리<jupyter_code>import pandas as pd import matplotlib as plt import seaborn as sns result = pd.read_excel('notebook.xlsx') result.head()<jupyter_output><empty_output><jupyter_text>### 스펙에 해당하는 컬럼이 난잡한 문자열로 되어 있으니 필요한 성능만을 꺼내도록 합시다. (노트북을 잘 모르기에 성능에 관한 것은 인터넷 정보에 상당부분 의존했습니다.) - CPU: 셀러론->팬티엄->i3->i5, 세대는 최신으로 올수록 좋은 것(9세대<10세대)<jupyter_code>result.스펙[0] # CPU : 코어i7-10세대 # 무게 : 1.35kg # SSD : 256GB # 배터리 : 80kw # 운영체재 미포함 # 지문인식<jupyter_output><empty_output>
no_license
/exercise/.ipynb_checkpoints/(2020.04.08)notebook(unfinished)-checkpoint.ipynb
Young-Jo-Choi/data_analysis
3
<jupyter_start><jupyter_text>## Hi, I'm Patricio Oria and this Jupyter Notebook will be used for the Coursera Capstone Project<jupyter_code>import pandas as pd import numpy as np print("Hello Capstone Project Course!")<jupyter_output>Hello Capstone Project Course!
no_license
/Capstone_Project.ipynb
PatricioOria/Coursera_Capstone
1
<jupyter_start><jupyter_text>Since we have '?' in our features, lets treat them first and then we can convert the other features to float.<jupyter_code>df.columns cols = ['X0', 'Y0', 'Z0', 'X1', 'Y1', 'Z1', 'X2', 'Y2', 'Z2', 'X3', 'Y3', 'Z3', 'X4', 'Y4', 'Z4', 'X5', 'Y5', 'Z5', 'X6', 'Y6', 'Z6', 'X7', 'Y7', 'Z7', 'X8', 'Y8', 'Z8', 'X9', 'Y9', 'Z9', 'X10', 'Y10', 'Z10', 'X11', 'Y11', 'Z11'] df[cols] = df[cols].replace({'?':np.NaN}) df.head() df [ "X3" ] = df [ "X3" ].astype (float) df [ "Y3" ] = df [ "Y3" ].astype (float) df [ "Z3" ] = df [ "Z3" ].astype (float) df [ "X4" ] = df [ "X4" ].astype (float) df [ "Y4" ] = df [ "Y4" ].astype (float) df [ "Z4" ] = df [ "Z4" ].astype (float) df [ "X5" ] = df [ "X5" ].astype (float) df [ "Y5" ] = df [ "Y5" ].astype (float) df [ "Z5" ] = df [ "Z5" ].astype (float) df [ "X6" ] = df [ "X6" ].astype (float) df [ "Y6" ] = df [ "Y6" ].astype (float) df [ "Z6" ] = df [ "Z6" ].astype (float) df.info() df['Class'].value_counts() df['Class'].value_counts().plot.bar() plt.show() df.describe().T<jupyter_output><empty_output><jupyter_text>## Checking for null<jupyter_code>df.isnull().sum() def fun(df): null=df.isnull().sum() perc=(null/(df.shape[0]))*100 d=pd.concat([null,perc],axis=1,keys=['null count','percentage']) return(d) fun(df)<jupyter_output><empty_output><jupyter_text>We can see that we have few columns that has more than 50% missing values. It is better to remove them.<jupyter_code>df.drop(['X7','Y7','Z7','X8','Y8','Z8','X9','Y9','Z9','X10','Y10','Z10','X11','Y11','Z11'], axis=1, inplace=True) df.head() df.info()<jupyter_output><class 'pandas.core.frame.DataFrame'> RangeIndex: 78096 entries, 0 to 78095 Data columns (total 23 columns): Class 78096 non-null object User 78096 non-null object X0 78096 non-null float64 Y0 78096 non-null float64 Z0 78096 non-null float64 X1 78096 non-null float64 Y1 78096 non-null float64 Z1 78096 non-null float64 X2 78096 non-null float64 Y2 78096 non-null float64 Z2 78096 non-null float64 X3 77406 non-null float64 Y3 77406 non-null float64 Z3 77406 non-null float64 X4 74976 non-null float64 Y4 74976 non-null float64 Z4 74976 non-null float64 X5 65073 non-null float64 Y5 65073 non-null float64 Z5 65073 non-null float64 X6 52248 non-null float64 Y6 52248 non-null float64 Z6 52248 non-null float64 dtypes: float64(21), object(2) memory usage: 13.7+ MB <jupyter_text>## Treating missing values with mean<jupyter_code>df=df.fillna(method = 'bfill') df.isnull().sum() df.info() df.head() categorical = list ( df.select_dtypes ( include = "object" ).columns ) print ( "Categorical features:\n" , categorical ) print ( "\n" ) numerical = list ( df.select_dtypes ( exclude = "object" ).columns ) print ( "Continous features:\n" , numerical ) for i in categorical: print ( "Countplot for feature:" , i ) sns.countplot ( df [ i ] ) plt.show ( ) for i in numerical: print ( "Boxplot for feature:" , i ) sns.boxplot ( df [ i ] ) plt.show ( ) for i in numerical: print ( "distplot for feature:" , i ) print ( "Skewness for" , i , "is:" , df [ i ].skew ( ) ) sns.distplot ( df [ i ] ) plt.show ( ) f, axes = plt.subplots(2, 3, figsize=(15, 10)) sns.kdeplot(df['X0'], color="b", ax=axes[0, 0]) sns.kdeplot(df['Y0'], color="b", ax=axes[0, 1]) sns.kdeplot(df['Z0'], color="b", ax=axes[0, 2]) sns.kdeplot(df['X1'], color="b", ax=axes[1, 0]) sns.kdeplot(df['Y1'], color="b", ax=axes[1, 1]) sns.kdeplot(df['Z1'], color="b", ax=axes[1, 2]) plt.show() f, axes = plt.subplots(2, 3, figsize=(15, 10)) sns.kdeplot(df['X2'], color="b", ax=axes[0, 0]) sns.kdeplot(df['Y2'], color="b", ax=axes[0, 1]) sns.kdeplot(df['Z2'], color="b", ax=axes[0, 2]) sns.kdeplot(df['X3'], color="b", ax=axes[1, 0]) sns.kdeplot(df['Y3'], color="b", ax=axes[1, 1]) sns.kdeplot(df['Z3'], color="b", ax=axes[1, 2]) plt.show() f, axes = plt.subplots(2, 3, figsize=(15, 10)) sns.kdeplot(df['X4'], color="b", ax=axes[0, 0]) sns.kdeplot(df['Y4'], color="b", ax=axes[0, 1]) sns.kdeplot(df['Z4'], color="b", ax=axes[0, 2]) sns.kdeplot(df['X5'], color="b", ax=axes[1, 0]) sns.kdeplot(df['Y5'], color="b", ax=axes[1, 1]) sns.kdeplot(df['Z5'], color="b", ax=axes[1, 2]) plt.show() f, axes = plt.subplots(2, 3, figsize=(15, 10)) sns.kdeplot(df['X6'], color="b", ax=axes[0, 0]) sns.kdeplot(df['Y6'], color="b", ax=axes[0, 1]) sns.kdeplot(df['Z6'], color="b", ax=axes[0, 2]) plt.show() f, axes = plt.subplots(2, 3, figsize=(15, 10)) sns.boxplot(df['X0'], color="c", ax=axes[0, 0]) sns.boxplot(df['Y0'], color="c", ax=axes[0, 1]) sns.boxplot(df['Z0'], color="c", ax=axes[0, 2]) sns.boxplot(df['X1'], color="c", ax=axes[1, 0]) sns.boxplot(df['Y1'], color="c", ax=axes[1, 1]) sns.boxplot(df['Z1'], color="c", ax=axes[1, 2]) plt.show() f, axes = plt.subplots(2, 3, figsize=(15, 10)) sns.boxplot(df['X2'], color="c", ax=axes[0, 0]) sns.boxplot(df['Y2'], color="c", ax=axes[0, 1]) sns.boxplot(df['Z2'], color="c", ax=axes[0, 2]) sns.boxplot(df['X3'], color="c", ax=axes[1, 0]) sns.boxplot(df['Y3'], color="c", ax=axes[1, 1]) sns.boxplot(df['Z3'], color="c", ax=axes[1, 2]) plt.show() f, axes = plt.subplots(2, 3, figsize=(15, 10)) sns.boxplot(df['X4'], color="c", ax=axes[0, 0]) sns.boxplot(df['Y4'], color="c", ax=axes[0, 1]) sns.boxplot(df['Z4'], color="c", ax=axes[0, 2]) sns.boxplot(df['X5'], color="c", ax=axes[1, 0]) sns.boxplot(df['Y5'], color="c", ax=axes[1, 1]) sns.boxplot(df['Z5'], color="c", ax=axes[1, 2]) plt.show() f, axes = plt.subplots(2, 3, figsize=(15, 10)) sns.boxplot(df['X6'], color="c", ax=axes[0, 0]) sns.boxplot(df['Y6'], color="c", ax=axes[0, 1]) sns.boxplot(df['Z6'], color="c", ax=axes[0, 2]) plt.show()<jupyter_output><empty_output><jupyter_text>## Dropping class 0<jupyter_code>df.drop(df[df['Class'] == '0'].index,axis=0, inplace = True) df.head() df['Class'].value_counts() df.info()<jupyter_output><class 'pandas.core.frame.DataFrame'> Int64Index: 78095 entries, 1 to 78095 Data columns (total 23 columns): Class 78095 non-null object User 78095 non-null object X0 78095 non-null float64 Y0 78095 non-null float64 Z0 78095 non-null float64 X1 78095 non-null float64 Y1 78095 non-null float64 Z1 78095 non-null float64 X2 78095 non-null float64 Y2 78095 non-null float64 Z2 78095 non-null float64 X3 78095 non-null float64 Y3 78095 non-null float64 Z3 78095 non-null float64 X4 78095 non-null float64 Y4 78095 non-null float64 Z4 78095 non-null float64 X5 78095 non-null float64 Y5 78095 non-null float64 Z5 78095 non-null float64 X6 78095 non-null float64 Y6 78095 non-null float64 Z6 78095 non-null float64 dtypes: float64(21), object(2) memory usage: 14.3+ MB <jupyter_text>To find the correlation we convert our target variable to integer and check the correlation using pairplot<jupyter_code># Converting target variable to categorrical. df [ "Class" ] = df [ "Class" ].astype ( int ) f , ax = plt.subplots ( figsize = ( 20 , 10 ) ) sns.heatmap ( df.corr ( ) , cmap = "YlGnBu" , annot = True ) plt.show ( ) target = df.corr() target = target [ "Class" ] print ( target )<jupyter_output>Class 1.000000 X0 0.080171 Y0 0.081755 Z0 0.051857 X1 -0.010569 Y1 0.144086 Z1 0.089920 X2 -0.052392 Y2 0.169251 Z2 0.095538 X3 -0.055502 Y3 0.174244 Z3 0.090655 X4 -0.055276 Y4 0.166296 Z4 0.064592 X5 -0.028390 Y5 0.132509 Z5 0.004403 X6 0.155534 Y6 -0.021696 Z6 -0.218600 Name: Class, dtype: float64 <jupyter_text>Dropping the user feature<jupyter_code>df = df.drop ( "User", axis = 1 ) df.head()<jupyter_output><empty_output><jupyter_text># Classification Models## Splitting Data into train and test#<jupyter_code>from sklearn.model_selection import train_test_split X = df.drop('Class', axis=1) y = df['Class'] X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=1) print(X_train.shape) print(y_train.shape) print(X_test.shape) print(y_test.shape)<jupyter_output>(54666, 21) (54666,) (23429, 21) (23429,) <jupyter_text># Decision Tree<jupyter_code>from sklearn.tree import DecisionTreeClassifier dt = DecisionTreeClassifier(max_depth=3, min_samples_leaf=10 ) dt.fit(X, y) !pip install pydotplus !pip install graphviz import os os.environ['PATH'] = os.environ['PATH']+';'+os.environ['CONDA_PREFIX']+r"\Library\bin\graphviz" from IPython.display import Image from sklearn.externals.six import StringIO from sklearn.tree import export_graphviz import pydotplus features = X.columns dot_data = export_graphviz(dt, out_file=None, feature_names=features) graph = pydotplus.graph_from_dot_data(dot_data) Image(graph.create_png()) from sklearn.tree import DecisionTreeClassifier from sklearn.metrics import accuracy_score , classification_report , confusion_matrix , roc_auc_score dt = DecisionTreeClassifier() dt.fit(X_train, y_train) y_pred_train = dt.predict(X_train) y_prob_train = dt.predict_proba(X_train)[:,1] y_pred = dt.predict(X_test) y_prob = dt.predict_proba(X_test)[:,1] from sklearn.metrics import accuracy_score, roc_auc_score, roc_curve print('Accuracy of Decision Tree-Train: ', accuracy_score(y_pred_train, y_train)) print('Accuracy of Decision Tree-Test: ', accuracy_score(y_pred, y_test)) print (classification_report (y_pred, y_test))<jupyter_output> precision recall f1-score support 1 0.99 0.98 0.99 4988 2 0.87 0.88 0.88 4382 3 0.92 0.91 0.91 4946 4 0.88 0.86 0.87 4502 5 0.89 0.91 0.90 4611 accuracy 0.91 23429 macro avg 0.91 0.91 0.91 23429 weighted avg 0.91 0.91 0.91 23429 <jupyter_text># Hyper Tuning <jupyter_code>from sklearn.tree import DecisionTreeClassifier from sklearn.model_selection import GridSearchCV, RandomizedSearchCV from sklearn.metrics import accuracy_score , classification_report , confusion_matrix , roc_auc_score from scipy.stats import randint as sp_randint dtc = DecisionTreeClassifier(random_state=1) params = {'max_depth' : sp_randint(2,10), 'min_samples_split': sp_randint(2,50), 'min_samples_leaf': sp_randint(1,20), 'criterion':['gini', 'entropy']} rand_search = RandomizedSearchCV(dtc, param_distributions=params, cv=3,random_state=1) rand_search.fit(X, y) print(rand_search.best_params_) dtc = DecisionTreeClassifier(**rand_search.best_params_) dtc.fit(X_train, y_train) y_pred_train = dtc.predict(X_train) y_prob_train = dtc.predict_proba(X_train)[:,1] y_pred = dtc.predict(X_test) y_prob = dtc.predict_proba(X_test)[:,1] from sklearn.metrics import accuracy_score, roc_auc_score, roc_curve print('Accuracy of Decision Tree-Train: ', accuracy_score(y_pred_train, y_train)) print('Accuracy of Decision Tree-Test: ', accuracy_score(y_pred, y_test)) print (classification_report (y_pred, y_test))<jupyter_output> precision recall f1-score support 1 0.95 0.98 0.97 4794 2 0.79 0.78 0.78 4514 3 0.78 0.81 0.79 4748 4 0.75 0.70 0.72 4731 5 0.77 0.78 0.78 4642 accuracy 0.81 23429 macro avg 0.81 0.81 0.81 23429 weighted avg 0.81 0.81 0.81 23429 <jupyter_text># Random Forest<jupyter_code>from sklearn.ensemble import RandomForestClassifier rfc = RandomForestClassifier(n_estimators=10, random_state=1) rfc.fit(X_train, y_train) y_pred_train = rfc.predict(X_train) y_prob_train = rfc.predict_proba(X_train)[:,1] y_pred = rfc.predict(X_test) y_prob = rfc.predict_proba(X_test)[:,1] from sklearn.metrics import accuracy_score, roc_auc_score, roc_curve print('Accuracy of Random Forest-Train: ', accuracy_score(y_pred_train, y_train)) print('Accuracy of Random Forest-Test: ', accuracy_score(y_pred, y_test)) print(classification_report (y_pred, y_test))<jupyter_output> precision recall f1-score support 1 1.00 1.00 1.00 4940 2 0.94 0.92 0.93 4551 3 0.96 0.96 0.96 4947 4 0.93 0.95 0.94 4309 5 0.95 0.95 0.95 4682 accuracy 0.96 23429 macro avg 0.95 0.96 0.96 23429 weighted avg 0.96 0.96 0.96 23429 <jupyter_text>## Hyper Tuning<jupyter_code>from scipy.stats import randint as sp_randint from sklearn.model_selection import GridSearchCV, RandomizedSearchCV from sklearn.metrics import accuracy_score , classification_report , confusion_matrix , roc_auc_score rfc = RandomForestClassifier(random_state=1) params = {'n_estimators': sp_randint(5,25), 'criterion': ['gini', 'entropy'], 'max_depth': sp_randint(2, 10), 'min_samples_split': sp_randint(2,20), 'min_samples_leaf': sp_randint(1, 20), 'max_features': sp_randint(2,15)} rand_search_rfc = RandomizedSearchCV(rfc, param_distributions=params,cv=3, random_state=1) rand_search_rfc.fit(X, y) print(rand_search_rfc.best_params_) rfc = RandomForestClassifier(**rand_search_rfc.best_params_) rfc.fit(X_train, y_train) y_pred_train = rfc.predict(X_train) y_prob_train = rfc.predict_proba(X_train)[:,1] y_pred = rfc.predict(X_test) y_prob = rfc.predict_proba(X_test)[:,1] from sklearn.metrics import accuracy_score, roc_auc_score, roc_curve print('Accuracy of Random Forest-Train: ', accuracy_score(y_pred_train, y_train)) print('Accuracy of Random Forest-Test: ', accuracy_score(y_pred, y_test)) print(classification_report (y_pred, y_test))<jupyter_output> precision recall f1-score support 1 0.99 0.98 0.99 4982 2 0.87 0.80 0.83 4887 3 0.90 0.91 0.90 4796 4 0.83 0.88 0.85 4155 5 0.87 0.88 0.87 4609 accuracy 0.89 23429 macro avg 0.89 0.89 0.89 23429 weighted avg 0.89 0.89 0.89 23429
no_license
/Motion Capture Hand Posture/Motion Capture Hand Posture.ipynb
AakashRaj15/Machine-Learning-Projects
12
<jupyter_start><jupyter_text>**Question 2** Experiment with 5 Dictionary’s In-Built Functions <jupyter_code>dit={"Name":"Akshay", "Place":"Kalyan","District":"Thane"} dit.get("Name") dit={"Name":"Akshay", "Place":"Kalyan","District":"Thane"} dit["Place"]="Dombivali" dit dit={"Name":"Akshay", "Place":"Kalyan","District":"Thane"} dit.popitem() dit dit={"Name":"Akshay", "Place":"Kalyan","District":"Thane"} dit.pop("Place") dit dit={"Name":"Akshay", "Place":"Kalyan","District":"Thane"} dit1=dit.copy() print("dit:" ,dit) print("dit1:",dit1) dit={"Name":"Akshay", "Place":"Kalyan","District":"Thane"} dit.values()<jupyter_output><empty_output>
no_license
/Day1/Assignment2.ipynb
Adrija08/Assignment
1
<jupyter_start><jupyter_text># Pivot Tablolar <jupyter_code>import pandas as pd import seaborn as sns titanic = sns.load_dataset("titanic") titanic.head() titanic.groupby("sex")["survived"].mean() titanic.groupby (["sex","class"]) [["survived"]].aggregate("mean").unstack() # pivot ile table titanic.pivot_table("survived", index="sex", columns="class") titanic.age.head() age = pd.cut(titanic["age"], [0,18,90]) age.head(10)<jupyter_output><empty_output><jupyter_text>**Age(yaş) değişkenini 0-18 ve 18-90 yaş aralığına bölerek kategorik bir değişken haline getiriyoruz.**<jupyter_code>titanic.pivot_table("survived",["sex",age],"class")<jupyter_output><empty_output>
no_license
/pivot_tablo.ipynb
ibrahimkocabas/Pandas
2
<jupyter_start><jupyter_text>## variables dummies<jupyter_code>dummy_sex = pd.get_dummies(df3['sex'], prefix='sex') dummy_sex.head() df4=pd.concat([df, dummy_sex], axis=1) df4<jupyter_output><empty_output>
no_license
/01-DataWrangling-RegLineal/.ipynb_checkpoints/T1 - 2 - analisis preliminar-checkpoint.ipynb
damianml/udemy-ml
1
<jupyter_start><jupyter_text>## Math How-Tos<jupyter_code>from sympy.functions.combinatorial.numbers import stirling, bell from sympy import binomial from sympy.utilities.iterables import partitions from sympy.combinatorics.partitions import IntegerPartition k = 3 n = 26 stirling(n, k) binomial(3, 2) [p.copy() for p in partitions(6)] for p in partitions(6, m=3): print(p) for p in partitions(12, m=3): print(p) binomial(22, 2) pow(1/2, 23) * 1/3<jupyter_output><empty_output>
no_license
/.ipynb_checkpoints/extras-checkpoint.ipynb
majk12/pbv
1
<jupyter_start><jupyter_text>#### Посмотрим на датасет<jupyter_code># Получим список файлов в директориях np_files_0_train = glob.glob("dataset/train/0/*.npy") np_files_0_val = glob.glob("dataset/val/0/*.npy") np_files_1_train = glob.glob("dataset/train/1/*.npy") np_files_1_val = glob.glob("dataset/val/1/*.npy") # Сортировка np_files_0_train.sort() np_files_0_val.sort() np_files_1_train.sort() np_files_1_val.sort() # Инициализация пустых DataFram'ов df_0_train = pd.DataFrame() df_0_val = pd.DataFrame() df_1_train = pd.DataFrame() df_1_val = pd.DataFrame() # Для вспомогательных нужд len_0_train = [] len_0_val = [] len_1_train = [] len_1_val = [] # Запись в DataFrame и оценка размерности (длины ряда) for i, npfile in enumerate(np_files_0_train): load_data = np.load(npfile) len_0_train.append(len(load_data)) df_0_train[i] = pd.Series(load_data[0:3653]) print("Length of time series in train 0 class: Max=", max(len_0_train), "Min=", min(len_0_train)) for i, npfile in enumerate(np_files_0_val): load_data = np.load(npfile) len_0_val.append(len(load_data)) df_0_val[i] = pd.Series(load_data[0:3653]) print("Length of time series in val 0 class: Max=", max(len_0_val), "Min=", min(len_0_val)) for i, npfile in enumerate(np_files_1_train): load_data = np.load(npfile) len_1_train.append(len(load_data)) df_1_train[i] = pd.Series(load_data[0:3653]) print("Length of time series in train 1 class: Max=", max(len_1_train),"Min=", min(len_1_train)) for i, npfile in enumerate(np_files_1_val): load_data = np.load(npfile) len_1_val.append(len(load_data)) df_1_val[i] = pd.Series(load_data[0:3653]) print("Length of time series in val 1 class: Max=", max(len_1_val), "Min=", min(len_1_val), "\n")<jupyter_output>Length of time series in train 0 class: Max= 3653 Min= 3653 Length of time series in val 0 class: Max= 3653 Min= 3653 Length of time series in train 1 class: Max= 3654 Min= 3653 Length of time series in val 1 class: Max= 3654 Min= 3653 <jupyter_text>В некоторых данных есть ряды длиннее на один временной интервал, можно использовать паддинг (np.pad), а можно просто стандартизировать ряды сделав все фиксированной длины. Поскольку ряды достаточно "длинные" и различаются всего на один интервал, обрежу лишнее до 3653-х временных интервалов.<jupyter_code>len(df_0_train.columns) len(df_1_train.columns)<jupyter_output><empty_output><jupyter_text>#### Классы не сбалансированы, придется наводить баланс вручную. ### Посмотрим на классы и попытаемся найти отличия между ними### Тренировочный набор данных <jupyter_code># Строим графики fig, axes = plt.subplots(nrows=3, ncols=2) axes[0,0].set_title('First class train', fontsize=20) df_0_train.plot(ax=axes[0,0], y=randint(0, df_0_train.shape[1]), color='green', figsize=(25,10)) df_0_train.plot(ax=axes[1,0], y=randint(0, df_0_train.shape[1]), color='green', figsize=(25,10)) df_0_train.plot(ax=axes[2,0], y=randint(0, df_0_train.shape[1]), color='green', figsize=(25,10)) axes[0,1].set_title('Second class train', fontsize=20) df_1_train.plot(ax=axes[0,1], y=randint(0, df_1_train.shape[1]), color='blue', figsize=(25,10)) df_1_train.plot(ax=axes[1,1], y=randint(0, df_1_train.shape[1]), color='blue', figsize=(25,10)) df_1_train.plot(ax=axes[2,1], y=randint(0, df_1_train.shape[1]), color='blue', figsize=(25,10))<jupyter_output><empty_output><jupyter_text>### Тестовый набор данных<jupyter_code># Строим графики fig, axes = plt.subplots(nrows=3, ncols=2) axes[0,0].set_title('First class train', fontsize=20) # test = pd.Series(np.load('dataset/val/0/1aa955bd-f962-445f-a207-a1451991d2cd.npy')[0:3653]) # test.plot(ax=axes[0,0], y=randint(0, test.shape[0]), color='green', figsize=(25,10)) df_0_val.plot(ax=axes[0,0], y=randint(0, df_0_val.shape[1]), color='green', figsize=(25,10)) df_0_val.plot(ax=axes[1,0], y=randint(0, df_0_val.shape[1]), color='green', figsize=(25,10)) df_0_val.plot(ax=axes[2,0], y=randint(0, df_0_val.shape[1]), color='green', figsize=(25,10)) axes[0,1].set_title('Second class train', fontsize=20) df_1_val.plot(ax=axes[0,1], y=randint(0, df_1_val.shape[1]), color='blue', figsize=(25,10)) df_1_val.plot(ax=axes[1,1], y=randint(0, df_1_val.shape[1]), color='blue', figsize=(25,10)) df_1_val.plot(ax=axes[2,1], y=randint(0, df_1_val.shape[1]), color='blue', figsize=(25,10))<jupyter_output><empty_output>
no_license
/data_exploration.ipynb
anujmhr/CNN_time_series_classifier-Pytorch-
4
<jupyter_start><jupyter_text>Les corrélations fortement négatives sont aussi significatives que les positives ; on considère donc les valeurs absolues, et on trie par ordre décroissant <jupyter_code>print(abs(correlations).sort_values(ascending=False))<jupyter_output><empty_output><jupyter_text>On voit que la plus forte corrélation concerne LSTAT (le statut inférieur de la population)# Régression linéaire multiple<jupyter_code>df.columns continuous_features = ['RM','DIS','TAX','PTRATIO'] discrete_features = ['CRIM','ZN','INDUS','CHAS','NOX','AGE','B','LSTAT']<jupyter_output><empty_output><jupyter_text>On élimine les features discrètes<jupyter_code>df1 = df.drop(discrete_features, axis=1) X = df1.drop(['MEDV'], axis=1) y = df1.MEDV X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=1) from sklearn.linear_model import LinearRegression lm = LinearRegression() lm.fit(X_train, y_train) # apprentissage y_pred = lm.predict(X_test) # prédiction sur l'ensemble de test plt.figure(figsize=(12,12)) plt.scatter(y_test, y_pred) plt.plot([y_test.min(),y_test.max()],[y_test.min(),y_test.max()], color='red', linewidth=3) plt.xlabel("Prix") plt.ylabel("Prediction de prix") plt.title("Prix reels vs predictions") sns.distplot(y_test-y_pred) print(np.sqrt(mean_squared_error(y_test, y_pred))) scoreR2 = r2_score(y_test, y_pred) print(scoreR2) lm.score(X_test,y_test)<jupyter_output><empty_output><jupyter_text># Regression par forêts aléatoires<jupyter_code>X = df.drop(['MEDV'], axis=1) y = df.MEDV X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=1) from sklearn import ensemble rf = ensemble.RandomForestRegressor() rf.fit(X_train, y_train) y_rf = rf.predict(X_test) print(rf.score(X_test,y_test)) plt.figure(figsize=(12,12)) plt.scatter(y_test, y_rf) plt.plot([y_test.min(),y_test.max()],[y_test.min(),y_test.max()], color='red', linewidth=3) plt.xlabel("Prix") plt.ylabel("Prediction de prix") plt.title("Prix reels vs predictions") sns.distplot(y_test-y_rf) print(np.sqrt(mean_squared_error(y_test, y_rf))) rf.score(X_test,y_test)<jupyter_output><empty_output><jupyter_text># XGBOOST <jupyter_code>import xgboost as XGB xgb = XGB.XGBRegressor() xgb.fit(X_train, y_train) y_xgb = xgb.predict(X_test) print(xgb.score(X_test,y_test)) plt.figure(figsize=(12,12)) plt.scatter(y_test, y_xgb) plt.plot([y_test.min(),y_test.max()],[y_test.min(),y_test.max()], color='red', linewidth=3) plt.xlabel("Prix") plt.ylabel("Prediction de prix") plt.title("Prix reels vs predictions")<jupyter_output><empty_output>
no_license
/notebooks/loiccoquerelle/boston-house-prices.ipynb
Sayem-Mohammad-Imtiaz/kaggle-notebooks
5
<jupyter_start><jupyter_text>### Please note that the solution is running with Python3.<jupyter_code>import pandas as pd import matplotlib.pyplot as plt import seaborn import re<jupyter_output><empty_output><jupyter_text>#### Import the data and get a high-level picture<jupyter_code>df = pd.read_csv('sales.csv') df.head() df.shape df.dtypes<jupyter_output><empty_output><jupyter_text>#### TODO: Fix column datatypes Change ordered_at to datetime Change price and line_total to float<jupyter_code>df['ordered_at'] = pd.to_datetime(df['ordered_at']) for column in ['price', 'line_total']: df[column] = df[column].apply(lambda x: float(x[1:])) df.dtypes<jupyter_output><empty_output><jupyter_text>#### TODO: drop if duplicated or null<jupyter_code>df[df.duplicated()].shape[0] df = df.drop_duplicates() df.isnull().sum() df[df['name'].isnull()].head() df = df.dropna()<jupyter_output><empty_output><jupyter_text>#### Sanity check for value ranges and to check assumptions<jupyter_code>df[(df['price'] * df['quantity']) != df['line_total']].shape[0] df[df['line_total'] < 0].shape[0]<jupyter_output><empty_output><jupyter_text>#### TODO: Set line_total = price * quantity if different. Then remove if line total < 0<jupyter_code>df = df[(df['price'] * df['quantity']) == df['line_total']] df = df[df['line_total'] >= 0] df.describe()<jupyter_output><empty_output><jupyter_text>#### TODO: Get value between "" in name and put it in category column<jupyter_code>pattern = r'^"([A-Z ]+)" (.*)' transform_func = lambda x: re.findall(pattern, x)[0] df[['category', 'name']] = df['name'] \ .apply(transform_func) \ .apply(pd.Series) df.head()<jupyter_output><empty_output><jupyter_text>#### Analysis, finally!<jupyter_code>f, ax = plt.subplots(figsize=(10, 6)) df.groupby('name')['line_total'].sum().sort_values(ascending=False).head(10).plot(kind='bar') f.autofmt_xdate() plt.show()<jupyter_output><empty_output>
no_license
/Lab2/sales_solutions.ipynb
adamnguyenitvn/bigdata_labs_2019
8
<jupyter_start><jupyter_text>Look that the size of the images have been reduced to half in each direction<jupyter_code>base_path = "/data/MNIST" df_train = pd.read_csv(base_path + "/mnist_train.csv", header = None) df_test = pd.read_csv(base_path + "/mnist_test.csv", header = None) X_data_train = df_train.iloc[:, 1:].values / 255 + 0.00001 X_data_test = df_test.iloc[:, 1:].values / 255 + 0.00001 y_data_train = df_train.iloc[:, 0].values y_data_test = df_test.iloc[:, 0].values X_data_train.shape, X_data_test.shape tf.reset_default_graph() with tf.Session() as sess: y = tf.placeholder(dtype=tf.int32, shape = [None], name="y") d = sess.run(tf.one_hot(y, depth=10), feed_dict={y: y_data_train}) df = pd.DataFrame(d) df["y"] = y_data_train df.head() class Batchable: import math import numpy as np def shuffle(self): X = self.X y = self.y idx = np.arange(X.shape[0]) np.random.shuffle(idx) self.X = X[idx, :] self.y = y[idx] return def __init__(self, X, y, batch_size = 128, seed = 1): np.random.seed(seed) self.X = X self.y = y self.shuffle() self.start = 0 self.batch_size = batch_size self.num_batches = math.ceil(X.shape[0] / batch_size) def next(self): start = self.start end = self.start + self.batch_size end = min(self.X.shape[0], end) self.start = end % self.X.shape[0] return self.X[start: end, :], self.y[start: end] def show(scores, ax = None): df = pd.DataFrame.from_dict(scores) if ax is None: _, ax = plt.subplots() df.plot.line(alpha = 0.4, ax = ax) df.rolling(100, min_periods=1).mean().plot.line(ax = ax) tf.reset_default_graph() iterations = 10000 mini_batch_size = 128 n_y = 10 n_x = 784 n_h = 300 X = tf.placeholder(tf.float32, [None, n_x]) y = tf.placeholder(tf.int32, [None]) y_oh = tf.one_hot(y, depth = n_y) W = tf.Variable(tf.truncated_normal(shape=[n_x, n_y], seed=1)) b = tf.zeros(shape=[n_y]) y_pred = tf.matmul(X, W) + b cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=y_pred, labels= y_oh)) gd_step = tf.train.GradientDescentOptimizer(0.1).minimize(cost) #gd_step = tf.train.AdamOptimizer(learning_rate=1e-3).minimize(cost) correct_mask = tf.equal(tf.argmax(y_pred, 1), tf.cast(y, tf.int64)) accuracy = tf.reduce_mean(tf.cast(correct_mask, tf.float32)) scores =[] with tf.Session() as sess: sess.run(tf.global_variables_initializer()) batchable = Batchable(X_data_train, y_data_train, mini_batch_size) for i in range(iterations): batch_x, batch_y = batchable.next() _, cost_ = sess.run([gd_step, cost], feed_dict={X: batch_x, y: batch_y}) scores.append(cost_) if i % 500 == 0 or i == iterations - 1: accuracy_train = sess.run(accuracy, feed_dict={X: X_data_train, y: y_data_train}) accuracy_test = sess.run(accuracy, feed_dict={X: X_data_test, y: y_data_test}) d = {"iteration": i,"training": accuracy_train, "testing:": accuracy_test} print(d) show(scores) tf.reset_default_graph() iterations = 10000 mini_batch_size = 128 n_y = 10 n_x = 784 n_h = 300 X = tf.placeholder(tf.float32, [None, n_x]) y = tf.placeholder(tf.int32, [None]) W1 = tf.Variable(tf.truncated_normal(shape=[n_x, n_h], seed=1)) b1 = tf.zeros(shape=[n_h]) y_oh = tf.one_hot(y, depth = n_y) A1 = tf.nn.relu(tf.matmul(X, W1) + b1) W2 = tf.Variable(tf.truncated_normal(shape=[n_h, n_y], seed=1)) b2 = tf.zeros(shape=[n_y]) y_pred = tf.matmul(A1, W2) + b2 cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=y_pred, labels= y_oh)) #gd_step = tf.train.GradientDescentOptimizer(0.1).minimize(cost) gd_step = tf.train.AdamOptimizer(learning_rate=1e-3).minimize(cost) correct_mask = tf.equal(tf.argmax(y_pred, 1), tf.cast(y, tf.int64)) accuracy = tf.reduce_mean(tf.cast(correct_mask, tf.float32)) scores =[] with tf.Session() as sess: sess.run(tf.global_variables_initializer()) batchable = Batchable(X_data_train, y_data_train, mini_batch_size) for i in range(iterations): batch_x, batch_y = batchable.next() _, cost_ = sess.run([gd_step, cost], feed_dict={X: batch_x, y: batch_y}) scores.append(cost_) if i % 500 == 0 or i == iterations - 1: accuracy_train = sess.run(accuracy, feed_dict={X: X_data_train, y: y_data_train}) accuracy_test = sess.run(accuracy, feed_dict={X: X_data_test, y: y_data_test}) d = {"iteration": i,"training": accuracy_train, "testing:": accuracy_test} print(d) show(scores) tf.reset_default_graph() n_x, n_y = 784, 10 iterations = 1000 def weight_variable(shape): initial = tf.truncated_normal(shape, stddev=0.1) return tf.Variable(initial) def bias_variable(shape): initial = tf.constant(0.1, shape=shape) return tf.Variable(initial) def conv2d(x, W): return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME') def max_pool_2x2(x): return tf.nn.max_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME') def conv_layer(input, shape): W = weight_variable(shape) b = bias_variable([shape[3]]) return tf.nn.relu(conv2d(input, W) + b) def full_layer(input, size): in_size = int(input.get_shape()[1]) W = weight_variable([in_size, size]) b = bias_variable([size]) return tf.matmul(input, W) + b X = tf.placeholder(tf.float32, shape=[None, n_x], name = "X") y = tf.placeholder(tf.int32, shape=[None], name = "y") y_oh = tf.one_hot(y, depth = n_y) x_image = tf.reshape(X, [-1, 28, 28, 1]) conv1 = conv_layer(x_image, shape=[5, 5, 1, 32]) conv1_pool = max_pool_2x2(conv1) conv2 = conv_layer(conv1_pool, shape=[5, 5, 32, 64]) conv2_pool = max_pool_2x2(conv2) conv2_flat = tf.reshape(conv2_pool, [-1, 7*7*64]) full_1 = tf.nn.relu(full_layer(conv2_flat, 1024)) keep_prob = tf.placeholder(tf.float32, name="keep_prob") full1_drop = tf.nn.dropout(full_1, keep_prob=keep_prob) y_conv = full_layer(full1_drop, 10) cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=y_conv, labels= y_oh)) optimization = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy) correct_prediction = tf.equal(tf.argmax(y_conv, 1), tf.cast(y, tf.int64)) accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) accuracy = accuracy batchable = Batchable(X_data_train, y_data_train) scores = [] with tf.Session() as sess: sess.run(tf.global_variables_initializer()) for i in range(iterations): batch_x, batch_y = batchable.next() _, train_accuracy, cost = sess.run([optimization, accuracy, cross_entropy], feed_dict={X: batch_x, y: batch_y, keep_prob: 0.8}) scores.append(cost) if i % 10 == 0 or i == iterations - 1: test_accuracy = sess.run(accuracy, feed_dict={X: X_data_test, y: y_data_test, keep_prob: 1.0}) print({"iteration": i, "train accuracy": train_accuracy, "test accuracy": test_accuracy}) show(scores) <jupyter_output>{'iteration': 0, 'train accuracy': 0.0859375, 'test accuracy': 0.0691} {'iteration': 10, 'train accuracy': 0.2109375, 'test accuracy': 0.2775} {'iteration': 20, 'train accuracy': 0.3203125, 'test accuracy': 0.56120002} {'iteration': 30, 'train accuracy': 0.3515625, 'test accuracy': 0.64590001} {'iteration': 40, 'train accuracy': 0.6171875, 'test accuracy': 0.741} {'iteration': 50, 'train accuracy': 0.609375, 'test accuracy': 0.8028} {'iteration': 60, 'train accuracy': 0.6640625, 'test accuracy': 0.84719998} {'iteration': 70, 'train accuracy': 0.6875, 'test accuracy': 0.86360002} {'iteration': 80, 'train accuracy': 0.7421875, 'test accuracy': 0.88169998} {'iteration': 90, 'train accuracy': 0.7421875, 'test accuracy': 0.88940001} {'iteration': 100, 'train accuracy': 0.7421875, 'test accuracy': 0.89590001} {'iteration': 110, 'train accuracy': 0.7578125, 'test accuracy': 0.90319997} {'iteration': 120, 'train accuracy': 0.84375, 'test accuracy': 0.90420002} {'iteration': 130, 'train accurac[...]
permissive
/TensorFlow - 05 CNN.ipynb
utkarshvns/machine-learning
1
<jupyter_start><jupyter_text># Assignment 4 - Naive Machine Translation and LSH You will now implement your first machine translation system and then you will see how locality sensitive hashing works. Let's get started by importing the required functions! If you are running this notebook in your local computer, don't forget to download the twitter samples and stopwords from nltk. ``` nltk.download('stopwords') nltk.download('twitter_samples') ```**NOTE**: The `Exercise xx` numbers in this assignment **_are inconsistent_** with the `UNQ_Cx` numbers.### This assignment covers the folowing topics: - [1. The word embeddings data for English and French words](#1) - [1.1 Generate embedding and transform matrices](#1-1) - [Exercise 1](#ex-01) - [2. Translations](#2) - [2.1 Translation as linear transformation of embeddings](#2-1) - [Exercise 2](#ex-02) - [Exercise 3](#ex-03) - [Exercise 4](#ex-04) - [2.2 Testing the translation](#2-2) - [Exercise 5](#ex-05) - [Exercise 6](#ex-06) - [3. LSH and document search](#3) - [3.1 Getting the document embeddings](#3-1) - [Exercise 7](#ex-07) - [Exercise 8](#ex-08) - [3.2 Looking up the tweets](#3-2) - [3.3 Finding the most similar tweets with LSH](#3-3) - [3.4 Getting the hash number for a vector](#3-4) - [Exercise 9](#ex-09) - [3.5 Creating a hash table](#3-5) - [Exercise 10](#ex-10) - [3.6 Creating all hash tables](#3-6) - [Exercise 11](#ex-11) <jupyter_code>import pdb import pickle import string import time import gensim import matplotlib.pyplot as plt import nltk import numpy as np import scipy import sklearn from gensim.models import KeyedVectors from nltk.corpus import stopwords, twitter_samples from nltk.tokenize import TweetTokenizer from utils import (cosine_similarity, get_dict, process_tweet) from os import getcwd # add folder, tmp2, from our local workspace containing pre-downloaded corpora files to nltk's data path filePath = f"{getcwd()}/../tmp2/" nltk.data.path.append(filePath)<jupyter_output><empty_output><jupyter_text> # 1. The word embeddings data for English and French words Write a program that translates English to French. ## The data The full dataset for English embeddings is about 3.64 gigabytes, and the French embeddings are about 629 megabytes. To prevent the Coursera workspace from crashing, we've extracted a subset of the embeddings for the words that you'll use in this assignment. If you want to run this on your local computer and use the full dataset, you can download the * English embeddings from Google code archive word2vec [look for GoogleNews-vectors-negative300.bin.gz](https://code.google.com/archive/p/word2vec/) * You'll need to unzip the file first. * and the French embeddings from [cross_lingual_text_classification](https://github.com/vjstark/crosslingual_text_classification). * in the terminal, type (in one line) `curl -o ./wiki.multi.fr.vec https://dl.fbaipublicfiles.com/arrival/vectors/wiki.multi.fr.vec` Then copy-paste the code below and run it.```python # Use this code to download and process the full dataset on your local computer from gensim.models import KeyedVectors en_embeddings = KeyedVectors.load_word2vec_format('./GoogleNews-vectors-negative300.bin', binary = True) fr_embeddings = KeyedVectors.load_word2vec_format('./wiki.multi.fr.vec') # loading the english to french dictionaries en_fr_train = get_dict('en-fr.train.txt') print('The length of the english to french training dictionary is', len(en_fr_train)) en_fr_test = get_dict('en-fr.test.txt') print('The length of the english to french test dictionary is', len(en_fr_train)) english_set = set(en_embeddings.vocab) french_set = set(fr_embeddings.vocab) en_embeddings_subset = {} fr_embeddings_subset = {} french_words = set(en_fr_train.values()) for en_word in en_fr_train.keys(): fr_word = en_fr_train[en_word] if fr_word in french_set and en_word in english_set: en_embeddings_subset[en_word] = en_embeddings[en_word] fr_embeddings_subset[fr_word] = fr_embeddings[fr_word] for en_word in en_fr_test.keys(): fr_word = en_fr_test[en_word] if fr_word in french_set and en_word in english_set: en_embeddings_subset[en_word] = en_embeddings[en_word] fr_embeddings_subset[fr_word] = fr_embeddings[fr_word] pickle.dump( en_embeddings_subset, open( "en_embeddings.p", "wb" ) ) pickle.dump( fr_embeddings_subset, open( "fr_embeddings.p", "wb" ) ) ```#### The subset of data To do the assignment on the Coursera workspace, we'll use the subset of word embeddings.<jupyter_code>en_embeddings_subset = pickle.load(open("en_embeddings.p", "rb")) fr_embeddings_subset = pickle.load(open("fr_embeddings.p", "rb"))<jupyter_output><empty_output><jupyter_text>#### Look at the data * en_embeddings_subset: the key is an English word, and the vaule is a 300 dimensional array, which is the embedding for that word. ``` 'the': array([ 0.08007812, 0.10498047, 0.04980469, 0.0534668 , -0.06738281, .... ``` * fr_embeddings_subset: the key is an French word, and the vaule is a 300 dimensional array, which is the embedding for that word. ``` 'la': array([-6.18250e-03, -9.43867e-04, -8.82648e-03, 3.24623e-02,... ```#### Load two dictionaries mapping the English to French words * A training dictionary * and a testing dictionary.<jupyter_code># loading the english to french dictionaries en_fr_train = get_dict('en-fr.train.txt') print('The length of the English to French training dictionary is', len(en_fr_train)) en_fr_test = get_dict('en-fr.test.txt') print('The length of the English to French test dictionary is', len(en_fr_train))<jupyter_output>The length of the English to French training dictionary is 5000 The length of the English to French test dictionary is 5000 <jupyter_text>#### Looking at the English French dictionary * `en_fr_train` is a dictionary where the key is the English word and the value is the French translation of that English word. ``` {'the': 'la', 'and': 'et', 'was': 'était', 'for': 'pour', ``` * `en_fr_test` is similar to `en_fr_train`, but is a test set. We won't look at it until we get to testing. ## 1.1 Generate embedding and transform matrices #### Exercise 01: Translating English dictionary to French by using embeddings You will now implement a function `get_matrices`, which takes the loaded data and returns matrices `X` and `Y`. Inputs: - `en_fr` : English to French dictionary - `en_embeddings` : English to embeddings dictionary - `fr_embeddings` : French to embeddings dictionary Returns: - Matrix `X` and matrix `Y`, where each row in X is the word embedding for an english word, and the same row in Y is the word embedding for the French version of that English word. Figure 2 Use the `en_fr` dictionary to ensure that the ith row in the `X` matrix corresponds to the ith row in the `Y` matrix.**Instructions**: Complete the function `get_matrices()`: * Iterate over English words in `en_fr` dictionary. * Check if the word have both English and French embedding. Hints Sets are useful data structures that can be used to check if an item is a member of a group. You can get words which are embedded into the language by using keys method. Keep vectors in `X` and `Y` sorted in list. You can use np.vstack() to merge them into the numpy matrix. numpy.vstack stacks the items in a list as rows in a matrix. <jupyter_code># UNQ_C1 (UNIQUE CELL IDENTIFIER, DO NOT EDIT) def get_matrices(en_fr, french_vecs, english_vecs): """ Input: en_fr: English to French dictionary french_vecs: French words to their corresponding word embeddings. english_vecs: English words to their corresponding word embeddings. Output: X: a matrix where the columns are the English embeddings. Y: a matrix where the columns correspong to the French embeddings. R: the projection matrix that minimizes the F norm ||X R -Y||^2. """ ### START CODE HERE (REPLACE INSTANCES OF 'None' with your code) ### # X_l and Y_l are lists of the english and french word embeddings X_l = list() Y_l = list() # get the english words (the keys in the dictionary) and store in a set() english_set = english_vecs.keys() # get the french words (keys in the dictionary) and store in a set() french_set = french_vecs.keys() # store the french words that are part of the english-french dictionary (these are the values of the dictionary) french_words = set(en_fr.values()) # loop through all english, french word pairs in the english french dictionary for en_word, fr_word in en_fr.items(): # check that the french word has an embedding and that the english word has an embedding if fr_word in french_set and en_word in english_set: # get the english embedding en_vec = english_vecs[en_word] # get the french embedding fr_vec = french_vecs[fr_word] # add the english embedding to the list X_l.append(en_vec) # add the french embedding to the list Y_l.append(fr_vec) # stack the vectors of X_l into a matrix X X = np.vstack(X_l) # stack the vectors of Y_l into a matrix Y Y = np.vstack(Y_l) ### END CODE HERE ### return X, Y <jupyter_output><empty_output><jupyter_text>Now we will use function `get_matrices()` to obtain sets `X_train` and `Y_train` of English and French word embeddings into the corresponding vector space models.<jupyter_code># UNQ_C2 (UNIQUE CELL IDENTIFIER, DO NOT EDIT) # You do not have to input any code in this cell, but it is relevant to grading, so please do not change anything # getting the training set: X_train, Y_train = get_matrices( en_fr_train, fr_embeddings_subset, en_embeddings_subset)<jupyter_output><empty_output><jupyter_text> # 2. Translations Figure 1 Write a program that translates English words to French words using word embeddings and vector space models. ## 2.1 Translation as linear transformation of embeddings Given dictionaries of English and French word embeddings you will create a transformation matrix `R` * Given an English word embedding, $\mathbf{e}$, you can multiply $\mathbf{eR}$ to get a new word embedding $\mathbf{f}$. * Both $\mathbf{e}$ and $\mathbf{f}$ are [row vectors](https://en.wikipedia.org/wiki/Row_and_column_vectors). * You can then compute the nearest neighbors to `f` in the french embeddings and recommend the word that is most similar to the transformed word embedding.### Describing translation as the minimization problem Find a matrix `R` that minimizes the following equation. $$\arg \min _{\mathbf{R}}\| \mathbf{X R} - \mathbf{Y}\|_{F}\tag{1} $$ ### Frobenius norm The Frobenius norm of a matrix $A$ (assuming it is of dimension $m,n$) is defined as the square root of the sum of the absolute squares of its elements: $$\|\mathbf{A}\|_{F} \equiv \sqrt{\sum_{i=1}^{m} \sum_{j=1}^{n}\left|a_{i j}\right|^{2}}\tag{2}$$### Actual loss function In the real world applications, the Frobenius norm loss: $$\| \mathbf{XR} - \mathbf{Y}\|_{F}$$ is often replaced by it's squared value divided by $m$: $$ \frac{1}{m} \| \mathbf{X R} - \mathbf{Y} \|_{F}^{2}$$ where $m$ is the number of examples (rows in $\mathbf{X}$). * The same R is found when using this loss function versus the original Frobenius norm. * The reason for taking the square is that it's easier to compute the gradient of the squared Frobenius. * The reason for dividing by $m$ is that we're more interested in the average loss per embedding than the loss for the entire training set. * The loss for all training set increases with more words (training examples), so taking the average helps us to track the average loss regardless of the size of the training set.##### [Optional] Detailed explanation why we use norm squared instead of the norm: Click for optional details The norm is always nonnegative (we're summing up absolute values), and so is the square. When we take the square of all non-negative (positive or zero) numbers, the order of the data is preserved. For example, if 3 > 2, 3^2 > 2^2 Using the norm or squared norm in gradient descent results in the same location of the minimum. Squaring cancels the square root in the Frobenius norm formula. Because of the chain rule, we would have to do more calculations if we had a square root in our expression for summation. Dividing the function value by the positive number doesn't change the optimum of the function, for the same reason as described above. We're interested in transforming English embedding into the French. Thus, it is more important to measure average loss per embedding than the loss for the entire dictionary (which increases as the number of words in the dictionary increases). ### Exercise 02: Implementing translation mechanism described in this section. #### Step 1: Computing the loss * The loss function will be squared Frobenoius norm of the difference between matrix and its approximation, divided by the number of training examples $m$. * Its formula is: $$ L(X, Y, R)=\frac{1}{m}\sum_{i=1}^{m} \sum_{j=1}^{n}\left( a_{i j} \right)^{2}$$ where $a_{i j}$ is value in $i$th row and $j$th column of the matrix $\mathbf{XR}-\mathbf{Y}$.#### Instructions: complete the `compute_loss()` function * Compute the approximation of `Y` by matrix multiplying `X` and `R` * Compute difference `XR - Y` * Compute the squared Frobenius norm of the difference and divide it by $m$. Hints Useful functions: Numpy dot , Numpy sum, Numpy square, Numpy norm Be careful about which operation is elementwise and which operation is a matrix multiplication. Try to use matrix operations instead of the numpy norm function. If you choose to use norm function, take care of extra arguments and that it's returning loss squared, and not the loss itself. <jupyter_code># UNQ_C3 (UNIQUE CELL IDENTIFIER, DO NOT EDIT) def compute_loss(X, Y, R): ''' Inputs: X: a matrix of dimension (m,n) where the columns are the English embeddings. Y: a matrix of dimension (m,n) where the columns correspong to the French embeddings. R: a matrix of dimension (n,n) - transformation matrix from English to French vector space embeddings. Outputs: L: a matrix of dimension (m,n) - the value of the loss function for given X, Y and R. ''' ### START CODE HERE (REPLACE INSTANCES OF 'None' with your code) ### # m is the number of rows in X m = X.shape[0] # diff is XR - Y diff = np.dot(X,R)-Y # diff_squared is the element-wise square of the difference diff_squared = diff**2 # sum_diff_squared is the sum of the squared elements sum_diff_squared = np.sum(diff_squared) # loss i the sum_diff_squard divided by the number of examples (m) loss = sum_diff_squared/m ### END CODE HERE ### return loss <jupyter_output><empty_output><jupyter_text> ### Exercise 03 ### Step 2: Computing the gradient of loss in respect to transform matrix R * Calculate the gradient of the loss with respect to transform matrix `R`. * The gradient is a matrix that encodes how much a small change in `R` affect the change in the loss function. * The gradient gives us the direction in which we should decrease `R` to minimize the loss. * $m$ is the number of training examples (number of rows in $X$). * The formula for the gradient of the loss function $𝐿(𝑋,𝑌,𝑅)$ is: $$\frac{d}{dR}𝐿(𝑋,𝑌,𝑅)=\frac{d}{dR}\Big(\frac{1}{m}\| X R -Y\|_{F}^{2}\Big) = \frac{2}{m}X^{T} (X R - Y)$$ **Instructions**: Complete the `compute_gradient` function below. Hints Transposing in numpy Finding out the dimensions of matrices in numpy Remember to use numpy.dot for matrix multiplication <jupyter_code># UNQ_C4 (UNIQUE CELL IDENTIFIER, DO NOT EDIT) def compute_gradient(X, Y, R): ''' Inputs: X: a matrix of dimension (m,n) where the columns are the English embeddings. Y: a matrix of dimension (m,n) where the columns correspong to the French embeddings. R: a matrix of dimension (n,n) - transformation matrix from English to French vector space embeddings. Outputs: g: a matrix of dimension (n,n) - gradient of the loss function L for given X, Y and R. ''' ### START CODE HERE (REPLACE INSTANCES OF 'None' with your code) ### # m is the number of rows in X m = X.shape[0] # gradient is X^T(XR - Y) * 2/m gradient = np.dot(X.transpose(),np.dot(X,R)-Y)*(2/m) ### END CODE HERE ### return gradient <jupyter_output><empty_output><jupyter_text>### Step 3: Finding the optimal R with gradient descent algorithm #### Gradient descent [Gradient descent](https://ml-cheatsheet.readthedocs.io/en/latest/gradient_descent.html) is an iterative algorithm which is used in searching for the optimum of the function. * Earlier, we've mentioned that the gradient of the loss with respect to the matrix encodes how much a tiny change in some coordinate of that matrix affect the change of loss function. * Gradient descent uses that information to iteratively change matrix `R` until we reach a point where the loss is minimized. #### Training with a fixed number of iterations Most of the time we iterate for a fixed number of training steps rather than iterating until the loss falls below a threshold. ##### OPTIONAL: explanation for fixed number of iterations click here for detailed discussion You cannot rely on training loss getting low -- what you really want is the validation loss to go down, or validation accuracy to go up. And indeed - in some cases people train until validation accuracy reaches a threshold, or -- commonly known as "early stopping" -- until the validation accuracy starts to go down, which is a sign of over-fitting. Why not always do "early stopping"? Well, mostly because well-regularized models on larger data-sets never stop improving. Especially in NLP, you can often continue training for months and the model will continue getting slightly and slightly better. This is also the reason why it's hard to just stop at a threshold -- unless there's an external customer setting the threshold, why stop, where do you put the threshold? Stopping after a certain number of steps has the advantage that you know how long your training will take - so you can keep some sanity and not train for months. You can then try to get the best performance within this time budget. Another advantage is that you can fix your learning rate schedule -- e.g., lower the learning rate at 10% before finish, and then again more at 1% before finishing. Such learning rate schedules help a lot, but are harder to do if you don't know how long you're training. Pseudocode: 1. Calculate gradient $g$ of the loss with respect to the matrix $R$. 2. Update $R$ with the formula: $$R_{\text{new}}= R_{\text{old}}-\alpha g$$ Where $\alpha$ is the learning rate, which is a scalar.#### Learning rate * The learning rate or "step size" $\alpha$ is a coefficient which decides how much we want to change $R$ in each step. * If we change $R$ too much, we could skip the optimum by taking too large of a step. * If we make only small changes to $R$, we will need many steps to reach the optimum. * Learning rate $\alpha$ is used to control those changes. * Values of $\alpha$ are chosen depending on the problem, and we'll use `learning_rate`$=0.0003$ as the default value for our algorithm. ### Exercise 04 #### Instructions: Implement `align_embeddings()` Hints Use the 'compute_gradient()' function to get the gradient in each step <jupyter_code># UNQ_C5 (UNIQUE CELL IDENTIFIER, DO NOT EDIT) def align_embeddings(X, Y, train_steps=100, learning_rate=0.0003): ''' Inputs: X: a matrix of dimension (m,n) where the columns are the English embeddings. Y: a matrix of dimension (m,n) where the columns correspong to the French embeddings. train_steps: positive int - describes how many steps will gradient descent algorithm do. learning_rate: positive float - describes how big steps will gradient descent algorithm do. Outputs: R: a matrix of dimension (n,n) - the projection matrix that minimizes the F norm ||X R -Y||^2 ''' np.random.seed(129) # the number of columns in X is the number of dimensions for a word vector (e.g. 300) # R is a square matrix with length equal to the number of dimensions in th word embedding R = np.random.rand(X.shape[1], X.shape[1]) for i in range(train_steps): if i % 25 == 0: print(f"loss at iteration {i} is: {compute_loss(X, Y, R):.4f}") ### START CODE HERE (REPLACE INSTANCES OF 'None' with your code) ### # use the function that you defined to compute the gradient gradient = compute_gradient(X,Y,R) # update R by subtracting the learning rate times gradient R -= learning_rate * gradient ### END CODE HERE ### return R # UNQ_C6 (UNIQUE CELL IDENTIFIER, DO NOT EDIT) # You do not have to input any code in this cell, but it is relevant to grading, so please do not change anything # Testing your implementation. np.random.seed(129) m = 10 n = 5 X = np.random.rand(m, n) Y = np.random.rand(m, n) * .1 R = align_embeddings(X, Y)<jupyter_output>loss at iteration 0 is: 3.7242 loss at iteration 25 is: 3.6283 loss at iteration 50 is: 3.5350 loss at iteration 75 is: 3.4442 <jupyter_text>**Expected Output:** ``` loss at iteration 0 is: 3.7242 loss at iteration 25 is: 3.6283 loss at iteration 50 is: 3.5350 loss at iteration 75 is: 3.4442 ```## Calculate transformation matrix R Using those the training set, find the transformation matrix $\mathbf{R}$ by calling the function `align_embeddings()`. **NOTE:** The code cell below will take a few minutes to fully execute (~3 mins)<jupyter_code># UNQ_C7 (UNIQUE CELL IDENTIFIER, DO NOT EDIT) # You do not have to input any code in this cell, but it is relevant to grading, so please do not change anything R_train = align_embeddings(X_train, Y_train, train_steps=400, learning_rate=0.8)<jupyter_output>loss at iteration 0 is: 963.0146 loss at iteration 25 is: 97.8292 loss at iteration 50 is: 26.8329 loss at iteration 75 is: 9.7893 loss at iteration 100 is: 4.3776 loss at iteration 125 is: 2.3281 loss at iteration 150 is: 1.4480 loss at iteration 175 is: 1.0338 loss at iteration 200 is: 0.8251 loss at iteration 225 is: 0.7145 loss at iteration 250 is: 0.6534 loss at iteration 275 is: 0.6185 loss at iteration 300 is: 0.5981 loss at iteration 325 is: 0.5858 loss at iteration 350 is: 0.5782 loss at iteration 375 is: 0.5735 <jupyter_text>##### Expected Output ``` loss at iteration 0 is: 963.0146 loss at iteration 25 is: 97.8292 loss at iteration 50 is: 26.8329 loss at iteration 75 is: 9.7893 loss at iteration 100 is: 4.3776 loss at iteration 125 is: 2.3281 loss at iteration 150 is: 1.4480 loss at iteration 175 is: 1.0338 loss at iteration 200 is: 0.8251 loss at iteration 225 is: 0.7145 loss at iteration 250 is: 0.6534 loss at iteration 275 is: 0.6185 loss at iteration 300 is: 0.5981 loss at iteration 325 is: 0.5858 loss at iteration 350 is: 0.5782 loss at iteration 375 is: 0.5735 ``` ## 2.2 Testing the translation ### k-Nearest neighbors algorithm [k-Nearest neighbors algorithm](https://en.wikipedia.org/wiki/K-nearest_neighbors_algorithm) * k-NN is a method which takes a vector as input and finds the other vectors in the dataset that are closest to it. * The 'k' is the number of "nearest neighbors" to find (e.g. k=2 finds the closest two neighbors). ### Searching for the translation embedding Since we're approximating the translation function from English to French embeddings by a linear transformation matrix $\mathbf{R}$, most of the time we won't get the exact embedding of a French word when we transform embedding $\mathbf{e}$ of some particular English word into the French embedding space. * This is where $k$-NN becomes really useful! By using $1$-NN with $\mathbf{eR}$ as input, we can search for an embedding $\mathbf{f}$ (as a row) in the matrix $\mathbf{Y}$ which is the closest to the transformed vector $\mathbf{eR}$### Cosine similarity Cosine similarity between vectors $u$ and $v$ calculated as the cosine of the angle between them. The formula is $$\cos(u,v)=\frac{u\cdot v}{\left\|u\right\|\left\|v\right\|}$$ * $\cos(u,v)$ = $1$ when $u$ and $v$ lie on the same line and have the same direction. * $\cos(u,v)$ is $-1$ when they have exactly opposite directions. * $\cos(u,v)$ is $0$ when the vectors are orthogonal (perpendicular) to each other.#### Note: Distance and similarity are pretty much opposite things. * We can obtain distance metric from cosine similarity, but the cosine similarity can't be used directly as the distance metric. * When the cosine similarity increases (towards $1$), the "distance" between the two vectors decreases (towards $0$). * We can define the cosine distance between $u$ and $v$ as $$d_{\text{cos}}(u,v)=1-\cos(u,v)$$ **Exercise 05**: Complete the function `nearest_neighbor()` Inputs: * Vector `v`, * A set of possible nearest neighbors `candidates` * `k` nearest neighbors to find. * The distance metric should be based on cosine similarity. * `cosine_similarity` function is already implemented and imported for you. It's arguments are two vectors and it returns the cosine of the angle between them. * Iterate over rows in `candidates`, and save the result of similarities between current row and vector `v` in a python list. Take care that similarities are in the same order as row vectors of `candidates`. * Now you can use [numpy argsort]( https://docs.scipy.org/doc/numpy/reference/generated/numpy.argsort.html#numpy.argsort) to sort the indices for the rows of `candidates`. Hints numpy.argsort sorts values from most negative to most positive (smallest to largest) The candidates that are nearest to 'v' should have the highest cosine similarity To get the last element of a list 'tmp', the notation is tmp[-1:] <jupyter_code># UNQ_C8 (UNIQUE CELL IDENTIFIER, DO NOT EDIT) def nearest_neighbor(v, candidates, k=1): """ Input: - v, the vector you are going find the nearest neighbor for - candidates: a set of vectors where we will find the neighbors - k: top k nearest neighbors to find Output: - k_idx: the indices of the top k closest vectors in sorted form """ ### START CODE HERE (REPLACE INSTANCES OF 'None' with your code) ### similarity_l = [] # for each candidate vector... for row in candidates: # get the cosine similarity cos_similarity = cosine_similarity(v,row) # append the similarity to the list similarity_l.append(cos_similarity) # sort the similarity list and get the indices of the sorted list sorted_ids = np.argsort(similarity_l) # get the indices of the k most similar candidate vectors k_idx = sorted_ids[-k:] ### END CODE HERE ### return k_idx # UNQ_C9 (UNIQUE CELL IDENTIFIER, DO NOT EDIT) # You do not have to input any code in this cell, but it is relevant to grading, so please do not change anything # Test your implementation: v = np.array([1, 0, 1]) candidates = np.array([[1, 0, 5], [-2, 5, 3], [2, 0, 1], [6, -9, 5], [9, 9, 9]]) print(candidates[nearest_neighbor(v, candidates, 3)])<jupyter_output>[[9 9 9] [1 0 5] [2 0 1]] <jupyter_text>**Expected Output**: `[[9 9 9] [1 0 5] [2 0 1]]`### Test your translation and compute its accuracy **Exercise 06**: Complete the function `test_vocabulary` which takes in English embedding matrix $X$, French embedding matrix $Y$ and the $R$ matrix and returns the accuracy of translations from $X$ to $Y$ by $R$. * Iterate over transformed English word embeddings and check if the closest French word vector belongs to French word that is the actual translation. * Obtain an index of the closest French embedding by using `nearest_neighbor` (with argument `k=1`), and compare it to the index of the English embedding you have just transformed. * Keep track of the number of times you get the correct translation. * Calculate accuracy as $$\text{accuracy}=\frac{\#(\text{correct predictions})}{\#(\text{total predictions})}$$<jupyter_code># UNQ_C10 (UNIQUE CELL IDENTIFIER, DO NOT EDIT) def test_vocabulary(X, Y, R): ''' Input: X: a matrix where the columns are the English embeddings. Y: a matrix where the columns correspong to the French embeddings. R: the transform matrix which translates word embeddings from English to French word vector space. Output: accuracy: for the English to French capitals ''' ### START CODE HERE (REPLACE INSTANCES OF 'None' with your code) ### # The prediction is X times R pred = np.dot(X,R) # initialize the number correct to zero num_correct = 0 # loop through each row in pred (each transformed embedding) for i in range(len(pred)): # get the index of the nearest neighbor of pred at row 'i'; also pass in the candidates in Y pred_idx = nearest_neighbor(pred[i],Y) # if the index of the nearest neighbor equals the row of i... \ if pred_idx == i: # increment the number correct by 1. num_correct += 1 # accuracy is the number correct divided by the number of rows in 'pred' (also number of rows in X) accuracy = num_correct / len(pred) ### END CODE HERE ### return accuracy <jupyter_output><empty_output><jupyter_text>Let's see how is your translation mechanism working on the unseen data:<jupyter_code>X_val, Y_val = get_matrices(en_fr_test, fr_embeddings_subset, en_embeddings_subset) # UNQ_C11 (UNIQUE CELL IDENTIFIER, DO NOT EDIT) # You do not have to input any code in this cell, but it is relevant to grading, so please do not change anything acc = test_vocabulary(X_val, Y_val, R_train) # this might take a minute or two print(f"accuracy on test set is {acc:.3f}")<jupyter_output>accuracy on test set is 0.557 <jupyter_text>**Expected Output**: ``` 0.557 ``` You managed to translate words from one language to another language without ever seing them with almost 56% accuracy by using some basic linear algebra and learning a mapping of words from one language to another! # 3. LSH and document search In this part of the assignment, you will implement a more efficient version of k-nearest neighbors using locality sensitive hashing. You will then apply this to document search. * Process the tweets and represent each tweet as a vector (represent a document with a vector embedding). * Use locality sensitive hashing and k nearest neighbors to find tweets that are similar to a given tweet.<jupyter_code># get the positive and negative tweets all_positive_tweets = twitter_samples.strings('positive_tweets.json') all_negative_tweets = twitter_samples.strings('negative_tweets.json') all_tweets = all_positive_tweets + all_negative_tweets<jupyter_output><empty_output><jupyter_text> ### 3.1 Getting the document embeddings #### Bag-of-words (BOW) document models Text documents are sequences of words. * The ordering of words makes a difference. For example, sentences "Apple pie is better than pepperoni pizza." and "Pepperoni pizza is better than apple pie" have opposite meanings due to the word ordering. * However, for some applications, ignoring the order of words can allow us to train an efficient and still effective model. * This approach is called Bag-of-words document model. #### Document embeddings * Document embedding is created by summing up the embeddings of all words in the document. * If we don't know the embedding of some word, we can ignore that word. **Exercise 07**: Complete the `get_document_embedding()` function. * The function `get_document_embedding()` encodes entire document as a "document" embedding. * It takes in a docoument (as a string) and a dictionary, `en_embeddings` * It processes the document, and looks up the corresponding embedding of each word. * It then sums them up and returns the sum of all word vectors of that processed tweet. Hints You can handle missing words easier by using the `get()` method of the python dictionary instead of the bracket notation (i.e. "[ ]"). See more about it here The default value for missing word should be the zero vector. Numpy will broadcast simple 0 scalar into a vector of zeros during the summation. Alternatively, skip the addition if a word is not in the dictonary. You can use your `process_tweet()` function which allows you to process the tweet. The function just takes in a tweet and returns a list of words. <jupyter_code># UNQ_C12 (UNIQUE CELL IDENTIFIER, DO NOT EDIT) def get_document_embedding(tweet, en_embeddings): ''' Input: - tweet: a string - en_embeddings: a dictionary of word embeddings Output: - doc_embedding: sum of all word embeddings in the tweet ''' doc_embedding = np.zeros(300) ### START CODE HERE (REPLACE INSTANCES OF 'None' with your code) ### # process the document into a list of words (process the tweet) processed_doc = process_tweet(tweet) for word in processed_doc: # add the word embedding to the running total for the document embedding doc_embedding += en_embeddings.get(word,0) ### END CODE HERE ### return doc_embedding # UNQ_C13 (UNIQUE CELL IDENTIFIER, DO NOT EDIT) # You do not have to input any code in this cell, but it is relevant to grading, so please do not change anything # testing your function custom_tweet = "RT @Twitter @chapagain Hello There! Have a great day. :) #good #morning http://chapagain.com.np" tweet_embedding = get_document_embedding(custom_tweet, en_embeddings_subset) tweet_embedding[-5:]<jupyter_output><empty_output><jupyter_text>**Expected output**: ``` array([-0.00268555, -0.15378189, -0.55761719, -0.07216644, -0.32263184]) ``` ### Exercise 08 #### Store all document vectors into a dictionary Now, let's store all the tweet embeddings into a dictionary. Implement `get_document_vecs()`<jupyter_code># UNQ_C14 (UNIQUE CELL IDENTIFIER, DO NOT EDIT) def get_document_vecs(all_docs, en_embeddings): ''' Input: - all_docs: list of strings - all tweets in our dataset. - en_embeddings: dictionary with words as the keys and their embeddings as the values. Output: - document_vec_matrix: matrix of tweet embeddings. - ind2Doc_dict: dictionary with indices of tweets in vecs as keys and their embeddings as the values. ''' # the dictionary's key is an index (integer) that identifies a specific tweet # the value is the document embedding for that document ind2Doc_dict = {} # this is list that will store the document vectors document_vec_l = [] for i, doc in enumerate(all_docs): ### START CODE HERE (REPLACE INSTANCES OF 'None' with your code) ### # get the document embedding of the tweet doc_embedding = get_document_embedding(doc,en_embeddings) # save the document embedding into the ind2Tweet dictionary at index i ind2Doc_dict[i] = doc_embedding # append the document embedding to the list of document vectors document_vec_l.append(doc_embedding) ### END CODE HERE ### # convert the list of document vectors into a 2D array (each row is a document vector) document_vec_matrix = np.vstack(document_vec_l) return document_vec_matrix, ind2Doc_dict document_vecs, ind2Tweet = get_document_vecs(all_tweets, en_embeddings_subset) # UNQ_C15 (UNIQUE CELL IDENTIFIER, DO NOT EDIT) # You do not have to input any code in this cell, but it is relevant to grading, so please do not change anything print(f"length of dictionary {len(ind2Tweet)}") print(f"shape of document_vecs {document_vecs.shape}")<jupyter_output>length of dictionary 10000 shape of document_vecs (10000, 300) <jupyter_text>##### Expected Output ``` length of dictionary 10000 shape of document_vecs (10000, 300) ``` ## 3.2 Looking up the tweets Now you have a vector of dimension (m,d) where `m` is the number of tweets (10,000) and `d` is the dimension of the embeddings (300). Now you will input a tweet, and use cosine similarity to see which tweet in our corpus is similar to your tweet.<jupyter_code>my_tweet = 'i am sad' process_tweet(my_tweet) tweet_embedding = get_document_embedding(my_tweet, en_embeddings_subset) # UNQ_C16 (UNIQUE CELL IDENTIFIER, DO NOT EDIT) # You do not have to input any code in this cell, but it is relevant to grading, so please do not change anything # this gives you a similar tweet as your input. # this implementation is vectorized... idx = np.argmax(cosine_similarity(document_vecs, tweet_embedding)) print(all_tweets[idx])<jupyter_output>@zoeeylim sad sad sad kid :( it's ok I help you watch the match HAHAHAHAHA <jupyter_text>##### Expected Output ``` @zoeeylim sad sad sad kid :( it's ok I help you watch the match HAHAHAHAHA ``` ## 3.3 Finding the most similar tweets with LSH You will now implement locality sensitive hashing (LSH) to identify the most similar tweet. * Instead of looking at all 10,000 vectors, you can just search a subset to find its nearest neighbors. Let's say your data points are plotted like this: Figure 3 You can divide the vector space into regions and search within one region for nearest neighbors of a given vector. Figure 4 <jupyter_code>N_VECS = len(all_tweets) # This many vectors. N_DIMS = len(ind2Tweet[1]) # Vector dimensionality. print(f"Number of vectors is {N_VECS} and each has {N_DIMS} dimensions.")<jupyter_output>Number of vectors is 10000 and each has 300 dimensions. <jupyter_text>#### Choosing the number of planes * Each plane divides the space to $2$ parts. * So $n$ planes divide the space into $2^{n}$ hash buckets. * We want to organize 10,000 document vectors into buckets so that every bucket has about $~16$ vectors. * For that we need $\frac{10000}{16}=625$ buckets. * We're interested in $n$, number of planes, so that $2^{n}= 625$. Now, we can calculate $n=\log_{2}625 = 9.29 \approx 10$.<jupyter_code># The number of planes. We use log2(625) to have ~16 vectors/bucket. N_PLANES = 10 # Number of times to repeat the hashing to improve the search. N_UNIVERSES = 25<jupyter_output><empty_output><jupyter_text> ## 3.4 Getting the hash number for a vector For each vector, we need to get a unique number associated to that vector in order to assign it to a "hash bucket". ### Hyperlanes in vector spaces * In $3$-dimensional vector space, the hyperplane is a regular plane. In $2$ dimensional vector space, the hyperplane is a line. * Generally, the hyperplane is subspace which has dimension $1$ lower than the original vector space has. * A hyperplane is uniquely defined by its normal vector. * Normal vector $n$ of the plane $\pi$ is the vector to which all vectors in the plane $\pi$ are orthogonal (perpendicular in $3$ dimensional case). ### Using Hyperplanes to split the vector space We can use a hyperplane to split the vector space into $2$ parts. * All vectors whose dot product with a plane's normal vector is positive are on one side of the plane. * All vectors whose dot product with the plane's normal vector is negative are on the other side of the plane. ### Encoding hash buckets * For a vector, we can take its dot product with all the planes, then encode this information to assign the vector to a single hash bucket. * When the vector is pointing to the opposite side of the hyperplane than normal, encode it by 0. * Otherwise, if the vector is on the same side as the normal vector, encode it by 1. * If you calculate the dot product with each plane in the same order for every vector, you've encoded each vector's unique hash ID as a binary number, like [0, 1, 1, ... 0]. ### Exercise 09: Implementing hash buckets We've initialized hash table `hashes` for you. It is list of `N_UNIVERSES` matrices, each describes its own hash table. Each matrix has `N_DIMS` rows and `N_PLANES` columns. Every column of that matrix is a `N_DIMS`-dimensional normal vector for each of `N_PLANES` hyperplanes which are used for creating buckets of the particular hash table. *Exercise*: Your task is to complete the function `hash_value_of_vector` which places vector `v` in the correct hash bucket. * First multiply your vector `v`, with a corresponding plane. This will give you a vector of dimension $(1,\text{N_planes})$. * You will then convert every element in that vector to 0 or 1. * You create a hash vector by doing the following: if the element is negative, it becomes a 0, otherwise you change it to a 1. * You then compute the unique number for the vector by iterating over `N_PLANES` * Then you multiply $2^i$ times the corresponding bit (0 or 1). * You will then store that sum in the variable `hash_value`. **Intructions:** Create a hash for the vector in the function below. Use this formula: $$ hash = \sum_{i=0}^{N-1} \left( 2^{i} \times h_{i} \right) $$#### Create the sets of planes * Create multiple (25) sets of planes (the planes that divide up the region). * You can think of these as 25 separate ways of dividing up the vector space with a different set of planes. * Each element of this list contains a matrix with 300 rows (the word vector have 300 dimensions), and 10 columns (there are 10 planes in each "universe").<jupyter_code>np.random.seed(0) planes_l = [np.random.normal(size=(N_DIMS, N_PLANES)) for _ in range(N_UNIVERSES)]<jupyter_output><empty_output><jupyter_text> Hints numpy.squeeze() removes unused dimensions from an array; for instance, it converts a (10,1) 2D array into a (10,) 1D array <jupyter_code># UNQ_C17 (UNIQUE CELL IDENTIFIER, DO NOT EDIT) def hash_value_of_vector(v, planes): """Create a hash for a vector; hash_id says which random hash to use. Input: - v: vector of tweet. It's dimension is (1, N_DIMS) - planes: matrix of dimension (N_DIMS, N_PLANES) - the set of planes that divide up the region Output: - res: a number which is used as a hash for your vector """ ### START CODE HERE (REPLACE INSTANCES OF 'None' with your code) ### # for the set of planes, # calculate the dot product between the vector and the matrix containing the planes # remember that planes has shape (300, 10) # The dot product will have the shape (1,10) dot_product = np.dot(v,planes) # get the sign of the dot product (1,10) shaped vector sign_of_dot_product = np.sign(dot_product) # set h to be false (eqivalent to 0 when used in operations) if the sign is negative, # and true (equivalent to 1) if the sign is positive (1,10) shaped vector h = sign_of_dot_product>=0 # remove extra un-used dimensions (convert this from a 2D to a 1D array) h = np.squeeze(h) # initialize the hash value to 0 hash_value = 0 n_planes = planes.shape[1] for i in range(n_planes): # increment the hash value by 2^i * h_i hash_value += np.power(2,i)*h[i] ### END CODE HERE ### # cast hash_value as an integer hash_value = int(hash_value) return hash_value # UNQ_C18 (UNIQUE CELL IDENTIFIER, DO NOT EDIT) # You do not have to input any code in this cell, but it is relevant to grading, so please do not change anything np.random.seed(0) idx = 0 planes = planes_l[idx] # get one 'universe' of planes to test the function vec = np.random.rand(1, 300) print(f" The hash value for this vector,", f"and the set of planes at index {idx},", f"is {hash_value_of_vector(vec, planes)}")<jupyter_output> The hash value for this vector, and the set of planes at index 0, is 768 <jupyter_text>##### Expected Output ``` The hash value for this vector, and the set of planes at index 0, is 768 ``` ## 3.5 Creating a hash table ### Exercise 10 Given that you have a unique number for each vector (or tweet), You now want to create a hash table. You need a hash table, so that given a hash_id, you can quickly look up the corresponding vectors. This allows you to reduce your search by a significant amount of time. We have given you the `make_hash_table` function, which maps the tweet vectors to a bucket and stores the vector there. It returns the `hash_table` and the `id_table`. The `id_table` allows you know which vector in a certain bucket corresponds to what tweet. Hints a dictionary comprehension, similar to a list comprehension, looks like this: `{i:0 for i in range(10)}`, where the key is 'i' and the value is zero for all key-value pairs. <jupyter_code># UNQ_C19 (UNIQUE CELL IDENTIFIER, DO NOT EDIT) # This is the code used to create a hash table: feel free to read over it def make_hash_table(vecs, planes): """ Input: - vecs: list of vectors to be hashed. - planes: the matrix of planes in a single "universe", with shape (embedding dimensions, number of planes). Output: - hash_table: dictionary - keys are hashes, values are lists of vectors (hash buckets) - id_table: dictionary - keys are hashes, values are list of vectors id's (it's used to know which tweet corresponds to the hashed vector) """ ### START CODE HERE (REPLACE INSTANCES OF 'None' with your code) ### # number of planes is the number of columns in the planes matrix num_of_planes = planes.shape[1] # number of buckets is 2^(number of planes) num_buckets = 2**num_of_planes # create the hash table as a dictionary. # Keys are integers (0,1,2.. number of buckets) # Values are empty lists hash_table = {i:[] for i in range(num_buckets)} # create the id table as a dictionary. # Keys are integers (0,1,2... number of buckets) # Values are empty lists id_table = {i:[] for i in range(num_buckets)} # for each vector in 'vecs' for i, v in enumerate(vecs): # calculate the hash value for the vector h = hash_value_of_vector(v,planes) # store the vector into hash_table at key h, # by appending the vector v to the list at key h hash_table[h].append(v) # store the vector's index 'i' (each document is given a unique integer 0,1,2...) # the key is the h, and the 'i' is appended to the list at key h id_table[h].append(i) ### END CODE HERE ### return hash_table, id_table # UNQ_C20 (UNIQUE CELL IDENTIFIER, DO NOT EDIT) # You do not have to input any code in this cell, but it is relevant to grading, so please do not change anything np.random.seed(0) planes = planes_l[0] # get one 'universe' of planes to test the function vec = np.random.rand(1, 300) tmp_hash_table, tmp_id_table = make_hash_table(document_vecs, planes) print(f"The hash table at key 0 has {len(tmp_hash_table[0])} document vectors") print(f"The id table at key 0 has {len(tmp_id_table[0])}") print(f"The first 5 document indices stored at key 0 of are {tmp_id_table[0][0:5]}")<jupyter_output>The hash table at key 0 has 3 document vectors The id table at key 0 has 3 The first 5 document indices stored at key 0 of are [3276, 3281, 3282] <jupyter_text>##### Expected output ``` The hash table at key 0 has 3 document vectors The id table at key 0 has 3 The first 5 document indices stored at key 0 of are [3276, 3281, 3282] ``` ### 3.6 Creating all hash tables You can now hash your vectors and store them in a hash table that would allow you to quickly look up and search for similar vectors. Run the cell below to create the hashes. By doing so, you end up having several tables which have all the vectors. Given a vector, you then identify the buckets in all the tables. You can then iterate over the buckets and consider much fewer vectors. The more buckets you use, the more accurate your lookup will be, but also the longer it will take.<jupyter_code># Creating the hashtables hash_tables = [] id_tables = [] for universe_id in range(N_UNIVERSES): # there are 25 hashes print('working on hash universe #:', universe_id) planes = planes_l[universe_id] hash_table, id_table = make_hash_table(document_vecs, planes) hash_tables.append(hash_table) id_tables.append(id_table)<jupyter_output>working on hash universe #: 0 working on hash universe #: 1 working on hash universe #: 2 working on hash universe #: 3 working on hash universe #: 4 working on hash universe #: 5 working on hash universe #: 6 working on hash universe #: 7 working on hash universe #: 8 working on hash universe #: 9 working on hash universe #: 10 working on hash universe #: 11 working on hash universe #: 12 working on hash universe #: 13 working on hash universe #: 14 working on hash universe #: 15 working on hash universe #: 16 working on hash universe #: 17 working on hash universe #: 18 working on hash universe #: 19 working on hash universe #: 20 working on hash universe #: 21 working on hash universe #: 22 working on hash universe #: 23 working on hash universe #: 24 <jupyter_text>### Approximate K-NN ### Exercise 11 Implement approximate K nearest neighbors using locality sensitive hashing, to search for documents that are similar to a given document at the index `doc_id`. ##### Inputs * `doc_id` is the index into the document list `all_tweets`. * `v` is the document vector for the tweet in `all_tweets` at index `doc_id`. * `planes_l` is the list of planes (the global variable created earlier). * `k` is the number of nearest neighbors to search for. * `num_universes_to_use`: to save time, we can use fewer than the total number of available universes. By default, it's set to `N_UNIVERSES`, which is $25$ for this assignment. The `approximate_knn` function finds a subset of candidate vectors that are in the same "hash bucket" as the input vector 'v'. Then it performs the usual k-nearest neighbors search on this subset (instead of searching through all 10,000 tweets). Hints There are many dictionaries used in this function. Try to print out planes_l, hash_tables, id_tables to understand how they are structured, what the keys represent, and what the values contain. To remove an item from a list, use `.remove()` To append to a list, use `.append()` To add to a set, use `.add()` <jupyter_code># UNQ_C21 (UNIQUE CELL IDENTIFIER, DO NOT EDIT) # This is the code used to do the fast nearest neighbor search. Feel free to go over it def approximate_knn(doc_id, v, planes_l, k=1, num_universes_to_use=N_UNIVERSES): """Search for k-NN using hashes.""" assert num_universes_to_use <= N_UNIVERSES # Vectors that will be checked as possible nearest neighbor vecs_to_consider_l = list() # list of document IDs ids_to_consider_l = list() # create a set for ids to consider, for faster checking if a document ID already exists in the set ids_to_consider_set = set() # loop through the universes of planes for universe_id in range(num_universes_to_use): # get the set of planes from the planes_l list, for this particular universe_id planes = planes_l[universe_id] # get the hash value of the vector for this set of planes hash_value = hash_value_of_vector(v, planes) # get the hash table for this particular universe_id hash_table = hash_tables[universe_id] # get the list of document vectors for this hash table, where the key is the hash_value document_vectors_l = hash_table[hash_value] # get the id_table for this particular universe_id id_table = id_tables[universe_id] # get the subset of documents to consider as nearest neighbors from this id_table dictionary new_ids_to_consider = id_table[hash_value] ### START CODE HERE (REPLACE INSTANCES OF 'None' with your code) ### # remove the id of the document that we're searching if doc_id in new_ids_to_consider: new_ids_to_consider.remove(doc_id) print(f"removed doc_id {doc_id} of input vector from new_ids_to_search") # loop through the subset of document vectors to consider for i, new_id in enumerate(new_ids_to_consider): # if the document ID is not yet in the set ids_to_consider... if new_id not in ids_to_consider_set: # access document_vectors_l list at index i to get the embedding # then append it to the list of vectors to consider as possible nearest neighbors document_vector_at_i = document_vectors_l[i] vecs_to_consider_l.append(document_vector_at_i) # append the new_id (the index for the document) to the list of ids to consider ids_to_consider_l.append(new_id) # also add the new_id to the set of ids to consider # (use this to check if new_id is not already in the IDs to consider) ids_to_consider_set.add(new_id) ### END CODE HERE ### # Now run k-NN on the smaller set of vecs-to-consider. print("Fast considering %d vecs" % len(vecs_to_consider_l)) # convert the vecs to consider set to a list, then to a numpy array vecs_to_consider_arr = np.array(vecs_to_consider_l) # call nearest neighbors on the reduced list of candidate vectors nearest_neighbor_idx_l = nearest_neighbor(v, vecs_to_consider_arr, k=k) # Use the nearest neighbor index list as indices into the ids to consider # create a list of nearest neighbors by the document ids nearest_neighbor_ids = [ids_to_consider_l[idx] for idx in nearest_neighbor_idx_l] return nearest_neighbor_ids #document_vecs, ind2Tweet doc_id = 0 doc_to_search = all_tweets[doc_id] vec_to_search = document_vecs[doc_id] # UNQ_C22 (UNIQUE CELL IDENTIFIER, DO NOT EDIT) # You do not have to input any code in this cell, but it is relevant to grading, so please do not change anything # Sample nearest_neighbor_ids = approximate_knn( doc_id, vec_to_search, planes_l, k=3, num_universes_to_use=5) print(f"Nearest neighbors for document {doc_id}") print(f"Document contents: {doc_to_search}") print("") for neighbor_id in nearest_neighbor_ids: print(f"Nearest neighbor at document id {neighbor_id}") print(f"document contents: {all_tweets[neighbor_id]}")<jupyter_output>Nearest neighbors for document 0 Document contents: #FollowFriday @France_Inte @PKuchly57 @Milipol_Paris for being top engaged members in my community this week :) Nearest neighbor at document id 2140 document contents: @PopsRamjet come one, every now and then is not so bad :) Nearest neighbor at document id 701 document contents: With the top cutie of Bohol :) https://t.co/Jh7F6U46UB Nearest neighbor at document id 51 document contents: #FollowFriday @France_Espana @reglisse_menthe @CCI_inter for being top engaged members in my community this week :)
no_license
/Natural Language Processing with Classification and Vector Spaces/Week 4 - Machine Translation and Document Search/C1_W4_Assignment_Solution.ipynb
NLGRF/Natural-Language-Processing-Specialization
23
<jupyter_start><jupyter_text># Load input images<jupyter_code>import glob # Read in car and non-car images images = glob.glob('*/*/*.png') cars = [] notcars = [] for image in images: if 'non-vehicles' in image: notcars.append(image) else: cars.append(image) print('number of cars is {} \nand number of notcars is {} '.format(len(cars), len(notcars)))<jupyter_output>number of cars is 8792 and number of notcars is 8968 <jupyter_text># Color BasedFirst, we use color based features to classify cars and notcars images:<jupyter_code>import matplotlib.image as mpimg import matplotlib.pyplot as plt import numpy as np import cv2 import glob import time # from sklearn.svm import LinearSVC from sklearn import svm, grid_search from sklearn.preprocessing import StandardScaler # NOTE: the next import is only valid # for scikit-learn version <= 0.17 # if you are using scikit-learn >= 0.18 then use this: # from sklearn.model_selection import train_test_split from sklearn.cross_validation import train_test_split # Define a function to compute binned color features def bin_spatial(img, size=(32, 32)): # Use cv2.resize().ravel() to create the feature vector features = cv2.resize(img, size).ravel() # Return the feature vector return features # Define a function to compute color histogram features def color_hist(img, nbins=32, bins_range=(0, 256)): # Compute the histogram of the color channels separately channel1_hist = np.histogram(img[:,:,0], bins=nbins, range=bins_range) channel2_hist = np.histogram(img[:,:,1], bins=nbins, range=bins_range) channel3_hist = np.histogram(img[:,:,2], bins=nbins, range=bins_range) # Concatenate the histograms into a single feature vector hist_features = np.concatenate((channel1_hist[0], channel2_hist[0], channel3_hist[0])) # Return the individual histograms, bin_centers and feature vector return hist_features # Define a function to extract features from a list of images # Have this function call bin_spatial() and color_hist() def extract_features(imgs, cspace='RGB', spatial_size=(32, 32), hist_bins=32, hist_range=(0, 256)): # Create a list to append feature vectors to features = [] # Iterate through the list of images for file in imgs: # Read in each one by one image = mpimg.imread(file) # apply color conversion if other than 'RGB' if cspace != 'RGB': if cspace == 'HSV': feature_image = cv2.cvtColor(image, cv2.COLOR_RGB2HSV) elif cspace == 'LUV': feature_image = cv2.cvtColor(image, cv2.COLOR_RGB2LUV) elif cspace == 'HLS': feature_image = cv2.cvtColor(image, cv2.COLOR_RGB2HLS) elif cspace == 'YUV': feature_image = cv2.cvtColor(image, cv2.COLOR_RGB2YUV) else: feature_image = np.copy(image) # Apply bin_spatial() to get spatial color features spatial_features = bin_spatial(feature_image, size=spatial_size) # Apply color_hist() also with a color space option now hist_features = color_hist(feature_image, nbins=hist_bins, bins_range=hist_range) # Append the new feature vector to the features list features.append(np.concatenate((spatial_features, hist_features))) # Return list of feature vectors return features spatial = 32 histbin = 32 car_features = extract_features(cars, cspace='RGB', spatial_size=(spatial, spatial), hist_bins=histbin, hist_range=(0, 256)) notcar_features = extract_features(notcars, cspace='RGB', spatial_size=(spatial, spatial), hist_bins=histbin, hist_range=(0, 256)) # Create an array stack of feature vectors X = np.vstack((car_features, notcar_features)).astype(np.float64) # Fit a per-column scaler X_scaler = StandardScaler().fit(X) # Apply the scaler to X scaled_X = X_scaler.transform(X) # Define the labels vector y = np.hstack((np.ones(len(car_features)), np.zeros(len(notcar_features)))) # Split up data into randomized training and test sets rand_state = np.random.randint(0, 100) X_train, X_test, y_train, y_test = train_test_split( scaled_X, y, test_size=0.2, random_state=rand_state) print('Using spatial binning of:',spatial, 'and', histbin,'histogram bins') print('Feature vector length:', len(X_train[0])) # Use a linear SVC # svc = LinearSVC() parameters = {'kernel':('linear', 'rbf'), 'C':[1, 10]} svr = svm.SVC() svc = grid_search.GridSearchCV(svr, parameters) # Check the training time for the SVC t=time.time() svc.fit(X_train, y_train) t2 = time.time() print(round(t2-t, 2), 'Seconds to train SVC...') # Check the score of the SVC print('Test Accuracy of SVC = ', round(svc.score(X_test, y_test), 4)) # Check the prediction time for a single sample t=time.time() n_predict = 10 print('My SVC predicts: ', svc.predict(X_test[0:n_predict])) print('For these',n_predict, 'labels: ', y_test[0:n_predict]) t2 = time.time() print(round(t2-t, 5), 'Seconds to predict', n_predict,'labels with SVC') svc.best_params_<jupyter_output><empty_output><jupyter_text>We used gridsearch from sklearn to find best parameters for classifier. As you see, the accuracy is more than 98%. You can also see best parameters as above.# HOG BasedNow we use HOG and SVM to classify images:<jupyter_code>import matplotlib.image as mpimg import matplotlib.pyplot as plt import numpy as np import cv2 import glob import time from sklearn.svm import LinearSVC from sklearn.preprocessing import StandardScaler from skimage.feature import hog # NOTE: the next import is only valid for scikit-learn version <= 0.17 # for scikit-learn >= 0.18 use: # from sklearn.model_selection import train_test_split from sklearn.cross_validation import train_test_split # Define a function to return HOG features and visualization def get_hog_features(img, orient, pix_per_cell, cell_per_block, vis=False, feature_vec=True): # Call with two outputs if vis==True if vis == True: features, hog_image = hog(img, orientations=orient, pixels_per_cell=(pix_per_cell, pix_per_cell), cells_per_block=(cell_per_block, cell_per_block), transform_sqrt=True, visualise=vis, feature_vector=feature_vec) return features, hog_image # Otherwise call with one output else: features = hog(img, orientations=orient, pixels_per_cell=(pix_per_cell, pix_per_cell), cells_per_block=(cell_per_block, cell_per_block), transform_sqrt=True, visualise=vis, feature_vector=feature_vec) return features # Define a function to extract features from a list of images # Have this function call bin_spatial() and color_hist() def extract_features(imgs, cspace='RGB', orient=9, pix_per_cell=8, cell_per_block=2, hog_channel=0): # Create a list to append feature vectors to features = [] # Iterate through the list of images for file in imgs: # Read in each one by one image = mpimg.imread(file) # apply color conversion if other than 'RGB' if cspace != 'RGB': if cspace == 'HSV': feature_image = cv2.cvtColor(image, cv2.COLOR_RGB2HSV) elif cspace == 'LUV': feature_image = cv2.cvtColor(image, cv2.COLOR_RGB2LUV) elif cspace == 'HLS': feature_image = cv2.cvtColor(image, cv2.COLOR_RGB2HLS) elif cspace == 'YUV': feature_image = cv2.cvtColor(image, cv2.COLOR_RGB2YUV) elif cspace == 'YCrCb': feature_image = cv2.cvtColor(image, cv2.COLOR_RGB2YCrCb) else: feature_image = np.copy(image) # Call get_hog_features() with vis=False, feature_vec=True if hog_channel == 'ALL': hog_features = [] for channel in range(feature_image.shape[2]): hog_features.append(get_hog_features(feature_image[:,:,channel], orient, pix_per_cell, cell_per_block, vis=False, feature_vec=True)) hog_features = np.ravel(hog_features) else: hog_features = get_hog_features(feature_image[:,:,hog_channel], orient, pix_per_cell, cell_per_block, vis=False, feature_vec=True) # Append the new feature vector to the features list features.append(hog_features) # Return list of feature vectors return features colorspace = 'RGB' # Can be RGB, HSV, LUV, HLS, YUV, YCrCb orient = 9 pix_per_cell = 8 cell_per_block = 2 hog_channel = 0 # Can be 0, 1, 2, or "ALL" t=time.time() car_features = extract_features(cars, cspace=colorspace, orient=orient, pix_per_cell=pix_per_cell, cell_per_block=cell_per_block, hog_channel=hog_channel) notcar_features = extract_features(notcars, cspace=colorspace, orient=orient, pix_per_cell=pix_per_cell, cell_per_block=cell_per_block, hog_channel=hog_channel) t2 = time.time() print(round(t2-t, 2), 'Seconds to extract HOG features...') # Create an array stack of feature vectors X = np.vstack((car_features, notcar_features)).astype(np.float64) # Fit a per-column scaler X_scaler = StandardScaler().fit(X) # Apply the scaler to X scaled_X = X_scaler.transform(X) # Define the labels vector y = np.hstack((np.ones(len(car_features)), np.zeros(len(notcar_features)))) # Split up data into randomized training and test sets rand_state = np.random.randint(0, 100) X_train, X_test, y_train, y_test = train_test_split( scaled_X, y, test_size=0.2, random_state=rand_state) print('Using:',orient,'orientations',pix_per_cell, 'pixels per cell and', cell_per_block,'cells per block') print('Feature vector length:', len(X_train[0])) # Use a linear SVC # svc = LinearSVC() parameters = {'kernel':('linear', 'rbf'), 'C':[1, 10]} svr = svm.SVC() svc = grid_search.GridSearchCV(svr, parameters) # Check the training time for the SVC t=time.time() svc.fit(X_train, y_train) t2 = time.time() print(round(t2-t, 2), 'Seconds to train SVC...') # Check the score of the SVC print('Test Accuracy of SVC = ', round(svc.score(X_test, y_test), 4)) # Check the prediction time for a single sample t=time.time() n_predict = 10 print('My SVC predicts: ', svc.predict(X_test[0:n_predict])) print('For these',n_predict, 'labels: ', y_test[0:n_predict]) t2 = time.time() print(round(t2-t, 5), 'Seconds to predict', n_predict,'labels with SVC') svc.best_params_<jupyter_output><empty_output><jupyter_text>As you can see, accuracy of HOG based classification is more than color based.# Combining FeaturesThen we combine both two former methods:<jupyter_code>import matplotlib.image as mpimg import matplotlib.pyplot as plt import numpy as np import cv2 import glob import time from sklearn.svm import LinearSVC from sklearn.preprocessing import StandardScaler # NOTE: the next import is only valid # for scikit-learn version <= 0.17 # if you are using scikit-learn >= 0.18 then use this: # from sklearn.model_selection import train_test_split from sklearn.cross_validation import train_test_split # Define a function to compute binned color features def bin_spatial(img, size=(32, 32)): # Use cv2.resize().ravel() to create the feature vector features = cv2.resize(img, size).ravel() # Return the feature vector return features # Define a function to compute color histogram features def color_hist(img, nbins=32, bins_range=(0, 256)): # Compute the histogram of the color channels separately channel1_hist = np.histogram(img[:,:,0], bins=nbins, range=bins_range) channel2_hist = np.histogram(img[:,:,1], bins=nbins, range=bins_range) channel3_hist = np.histogram(img[:,:,2], bins=nbins, range=bins_range) # Concatenate the histograms into a single feature vector hist_features = np.concatenate((channel1_hist[0], channel2_hist[0], channel3_hist[0])) # Return the individual histograms, bin_centers and feature vector return hist_features # Define a function to extract features from a list of images # Have this function call bin_spatial() and color_hist() def extract_features(imgs, cspace='RGB', spatial_size=(32, 32), hist_bins=32, hist_range=(0, 256), orient=9, pix_per_cell=8, cell_per_block=2, hog_channel='ALL'): # Create a list to append feature vectors to features = [] # Iterate through the list of images for file in imgs: # Read in each one by one image = mpimg.imread(file) # apply color conversion if other than 'RGB' if cspace != 'RGB': if cspace == 'HSV': feature_image = cv2.cvtColor(image, cv2.COLOR_RGB2HSV) elif cspace == 'LUV': feature_image = cv2.cvtColor(image, cv2.COLOR_RGB2LUV) elif cspace == 'HLS': feature_image = cv2.cvtColor(image, cv2.COLOR_RGB2HLS) elif cspace == 'YUV': feature_image = cv2.cvtColor(image, cv2.COLOR_RGB2YUV) else: feature_image = convert_color(image, conv='RGB2YCrCb') # Apply bin_spatial() to get spatial color features spatial_features = bin_spatial(feature_image, size=spatial_size) # Apply color_hist() also with a color space option now hist_features = color_hist(feature_image, nbins=hist_bins, bins_range=hist_range) # Call get_hog_features() with vis=False, feature_vec=True if hog_channel == 'ALL': hog_features = [] for channel in range(feature_image.shape[2]): hog_features.append(get_hog_features(feature_image[:,:,channel], orient, pix_per_cell, cell_per_block, vis=False, feature_vec=True)) hog_features = np.ravel(hog_features) else: hog_features = get_hog_features(feature_image[:,:,hog_channel], orient, pix_per_cell, cell_per_block, vis=False, feature_vec=True) # Append the new feature vector to the features list features.append(np.concatenate((spatial_features, hist_features, hog_features))) # Return list of feature vectors return features def convert_color(img, conv='RGB2YCrCb'): if conv == 'RGB2YCrCb': return cv2.cvtColor(img, cv2.COLOR_RGB2YCrCb) if conv == 'BGR2YCrCb': return cv2.cvtColor(img, cv2.COLOR_BGR2YCrCb) if conv == 'RGB2LUV': return cv2.cvtColor(img, cv2.COLOR_RGB2LUV) # performs under different binning scenarios spatial = 32 histbin = 32 car_features = extract_features(cars, cspace='RGB', spatial_size=(spatial, spatial), hist_bins=histbin, hist_range=(0, 256)) notcar_features = extract_features(notcars, cspace='RGB', spatial_size=(spatial, spatial), hist_bins=histbin, hist_range=(0, 256)) # Create an array stack of feature vectors X = np.vstack((car_features, notcar_features)).astype(np.float64) # Fit a per-column scaler X_scaler = StandardScaler().fit(X) # Apply the scaler to X scaled_X = X_scaler.transform(X) # Define the labels vector y = np.hstack((np.ones(len(car_features)), np.zeros(len(notcar_features)))) # Split up data into randomized training and test sets rand_state = np.random.randint(0, 100) X_train, X_test, y_train, y_test = train_test_split( scaled_X, y, test_size=0.2, random_state=rand_state) print('Using spatial binning of:',spatial, 'and', histbin,'histogram bins') print('Feature vector length:', len(X_train[0])) # Use a linear SVC # svc = LinearSVC() parameters = {'kernel':('linear', 'rbf'), 'C':[1, 10]} svr = svm.SVC() svc = grid_search.GridSearchCV(svr, parameters) # Check the training time for the SVC t=time.time() svc.fit(X_train, y_train) t2 = time.time() print(round(t2-t, 2), 'Seconds to train SVC...') # Check the score of the SVC print('Test Accuracy of SVC = ', round(svc.score(X_test, y_test), 4)) # Check the prediction time for a single sample t=time.time() n_predict = 10 print('My SVC predicts: ', svc.predict(X_test[0:n_predict])) print('For these',n_predict, 'labels: ', y_test[0:n_predict]) t2 = time.time() print(round(t2-t, 5), 'Seconds to predict', n_predict,'labels with SVC')<jupyter_output>C:\Users\Isaac\Documents\miniconda3\envs\carnd-term1\lib\site-packages\skimage\feature\_hog.py:119: skimage_deprecation: Default value of `block_norm`==`L1` is deprecated and will be changed to `L2-Hys` in v0.15 'be changed to `L2-Hys` in v0.15', skimage_deprecation) <jupyter_text>We got 99.72% accuracy on test data set. That's great :)# Processing PipelineApply trained classifier on a frame of video and see its performance:<jupyter_code>import matplotlib.image as mpimg import matplotlib.pyplot as plt import numpy as np import pickle import cv2 %matplotlib inline def draw_boxes(img, bboxes, color=(0, 0, 255), thick=6): # Make a copy of the image imcopy = np.copy(img) # Iterate through the bounding boxes for bbox in bboxes: # Draw a rectangle given bbox coordinates cv2.rectangle(imcopy, bbox[0], bbox[1], color, thick) # Return the image copy with boxes drawn return imcopy img = mpimg.imread('test_images/test4.jpg') spatial_size=(32, 32) hist_bins = 32 # Define a single function that can extract features using hog sub-sampling and make predictions def find_cars(img, ystart, ystop, scale, svc, X_scaler, orient, pix_per_cell, cell_per_block, spatial_size, hist_bins): draw_img = np.copy(img) img = img.astype(np.float32)/255 img_tosearch = img[ystart:ystop,:,:] ctrans_tosearch = convert_color(img_tosearch, conv='RGB2YCrCb') if scale != 1: imshape = ctrans_tosearch.shape ctrans_tosearch = cv2.resize(ctrans_tosearch, (np.int(imshape[1]/scale), np.int(imshape[0]/scale))) ch1 = ctrans_tosearch[:,:,0] ch2 = ctrans_tosearch[:,:,1] ch3 = ctrans_tosearch[:,:,2] # Define blocks and steps as above nxblocks = (ch1.shape[1] // pix_per_cell)-1 nyblocks = (ch1.shape[0] // pix_per_cell)-1 nfeat_per_block = orient*cell_per_block**2 # 64 was the orginal sampling rate, with 8 cells and 8 pix per cell window = 64 nblocks_per_window = (window // pix_per_cell)-1 cells_per_step = 2 # Instead of overlap, define how many cells to step nxsteps = (nxblocks - nblocks_per_window) // cells_per_step nysteps = (nyblocks - nblocks_per_window) // cells_per_step # Compute individual channel HOG features for the entire image hog1 = get_hog_features(ch1, orient, pix_per_cell, cell_per_block, feature_vec=False) hog2 = get_hog_features(ch2, orient, pix_per_cell, cell_per_block, feature_vec=False) hog3 = get_hog_features(ch3, orient, pix_per_cell, cell_per_block, feature_vec=False) b_boxes = [] for xb in range(nxsteps): for yb in range(nysteps): ypos = yb*cells_per_step xpos = xb*cells_per_step # Extract HOG for this patch hog_feat1 = hog1[ypos:ypos+nblocks_per_window, xpos:xpos+nblocks_per_window].ravel() hog_feat2 = hog2[ypos:ypos+nblocks_per_window, xpos:xpos+nblocks_per_window].ravel() hog_feat3 = hog3[ypos:ypos+nblocks_per_window, xpos:xpos+nblocks_per_window].ravel() hog_features = np.hstack((hog_feat1, hog_feat2, hog_feat3)) xleft = xpos*pix_per_cell ytop = ypos*pix_per_cell # Extract the image patch subimg = cv2.resize(ctrans_tosearch[ytop:ytop+window, xleft:xleft+window], (64,64)) # Get color features spatial_features = bin_spatial(subimg, size=spatial_size) hist_features = color_hist(subimg, nbins=hist_bins) # Scale features and make a prediction test_features = X_scaler.transform(np.hstack((spatial_features, hist_features, hog_features)).reshape(1, -1)) #test_features = X_scaler.transform(np.hstack((shape_feat, hist_feat)).reshape(1, -1)) test_prediction = svc.predict(test_features) if test_prediction == 1: xbox_left = np.int(xleft*scale) ytop_draw = np.int(ytop*scale) win_draw = np.int(window*scale) b_boxes.append(((xbox_left, ytop_draw+ystart),(xbox_left+win_draw,ytop_draw+win_draw+ystart))) return b_boxes ystart = 400 ystop = 656 scale = 1.5 b_boxes = find_cars(img, ystart, ystop, scale, svc, X_scaler, orient, pix_per_cell, cell_per_block, spatial_size, hist_bins) out_img = draw_boxes(img, b_boxes) plt.imshow(out_img)<jupyter_output>C:\Users\Isaac\Documents\miniconda3\envs\carnd-term1\lib\site-packages\skimage\feature\_hog.py:119: skimage_deprecation: Default value of `block_norm`==`L1` is deprecated and will be changed to `L2-Hys` in v0.15 'be changed to `L2-Hys` in v0.15', skimage_deprecation) <jupyter_text>The algorithm works great on the test frame. We define some other helper functions to create heatmap and reject false car detections. <jupyter_code>from scipy.ndimage.measurements import label def add_heat(heatmap, bbox_list): # Iterate through list of bboxes for box in bbox_list: # Add += 1 for all pixels inside each bbox # Assuming each "box" takes the form ((x1, y1), (x2, y2)) heatmap[box[0][1]:box[1][1], box[0][0]:box[1][0]] += 1 # Return updated heatmap return heatmap def apply_threshold(heatmap, threshold): # Zero out pixels below the threshold heatmap[heatmap <= threshold] = 0 # Return thresholded map return heatmap def draw_labeled_bboxes(img, labels): # Iterate through all detected cars for car_number in range(1, labels[1]+1): # Find pixels with each car_number label value nonzero = (labels[0] == car_number).nonzero() # Identify x and y values of those pixels nonzeroy = np.array(nonzero[0]) nonzerox = np.array(nonzero[1]) # Define a bounding box based on min/max x and y bbox = ((np.min(nonzerox), np.min(nonzeroy)), (np.max(nonzerox), np.max(nonzeroy))) # Draw the box on the image cv2.rectangle(img, bbox[0], bbox[1], (0,0,255), 6) # Return the image return img test_images = glob.glob('test_images/*') for image in test_images: img = mpimg.imread(image) b_boxes = find_cars(img, ystart, ystop, scale, svc, X_scaler, orient, pix_per_cell, cell_per_block, spatial_size, hist_bins) heat = np.zeros_like(img[:,:,0]).astype(np.float) add_heat(heat, b_boxes) heat = apply_threshold(heat,.9) heatmap = np.clip(heat, 0, 255) labels = label(heatmap) draw_img = draw_labeled_bboxes(np.copy(img), labels) plt.figure(figsize=(15,10)) plt.subplot(121) plt.imshow(heat, cmap='hot') plt.subplot(122) plt.imshow(draw_img) <jupyter_output>C:\Users\Isaac\Documents\miniconda3\envs\carnd-term1\lib\site-packages\skimage\feature\_hog.py:119: skimage_deprecation: Default value of `block_norm`==`L1` is deprecated and will be changed to `L2-Hys` in v0.15 'be changed to `L2-Hys` in v0.15', skimage_deprecation) <jupyter_text>Performance of algorithm on test frames is good. Next we apply the pipeline on video.# Video Processing Pipeline<jupyter_code>from collections import deque b_boxes_deque = deque(maxlen=30) def add_heat_video(heatmap, b_boxes_deque): # Iterate through list of bboxes for bbox_list in b_boxes_deque: for box in bbox_list: # Add += 1 for all pixels inside each bbox # Assuming each "box" takes the form ((x1, y1), (x2, y2)) heatmap[box[0][1]:box[1][1], box[0][0]:box[1][0]] += 1 # Return updated heatmap return heatmap def pipeline(img): b_boxes = find_cars(img, ystart, ystop, scale, svc, X_scaler, orient, pix_per_cell, cell_per_block, spatial_size, hist_bins) b_boxes_deque.append(b_boxes) heat = np.zeros_like(img[:,:,0]).astype(np.float) add_heat_video(heat, b_boxes_deque) heat = apply_threshold(heat,5) heatmap = np.clip(heat, 0, 255) labels = label(heatmap) draw_img = draw_labeled_bboxes(np.copy(img), labels) return draw_img from moviepy.editor import VideoFileClip output = 'project_video_output.mp4' clip1 = VideoFileClip("project_video.mp4") output_clip = clip1.fl_image(pipeline) %time output_clip.write_videofile(output, audio=False, fps=10)<jupyter_output>[MoviePy] >>>> Building video project_video_output.mp4 [MoviePy] Writing video project_video_output.mp4
no_license
/Vehicle-Detection-rbf-kernel.ipynb
kargarisaac/SDCND-Term1-Vehicle-Detection
7
<jupyter_start><jupyter_text>#### 1. 定义神经网络的相关参数和变量。<jupyter_code>batch_size = 8 x = tf.placeholder(tf.float32, shape=(None, 2), name="x-input") y_ = tf.placeholder(tf.float32, shape=(None, 1), name='y-input') w1= tf.Variable(tf.random_normal([2, 1], stddev=1, seed=1)) y = tf.matmul(x, w1)<jupyter_output><empty_output><jupyter_text>#### 2. 设置自定义的损失函数。<jupyter_code># 定义损失函数使得预测少了的损失大,于是模型应该偏向多的方向预测。 loss_less = 10 loss_more = 1 loss = tf.reduce_sum(tf.where(tf.greater(y, y_), (y - y_) * loss_more, (y_ - y) * loss_less)) train_step = tf.train.AdamOptimizer(0.001).minimize(loss)<jupyter_output>WARNING:tensorflow:From <ipython-input-3-aa2614c7eb8a>:4: add_dispatch_support.<locals>.wrapper (from tensorflow.python.ops.array_ops) is deprecated and will be removed in a future version. Instructions for updating: Use tf.where in 2.0, which has the same broadcast rule as np.where <jupyter_text>#### 3. 生成模拟数据集。<jupyter_code>rdm = RandomState(1) X = rdm.rand(128,2) Y = [[x1+x2+(rdm.rand()/10.0-0.05)] for (x1, x2) in X]<jupyter_output><empty_output><jupyter_text>#### 4. 训练模型。<jupyter_code>with tf.Session() as sess: init_op = tf.global_variables_initializer() sess.run(init_op) STEPS = 5000 for i in range(STEPS): start = (i*batch_size) % 128 end = (i*batch_size) % 128 + batch_size sess.run(train_step, feed_dict={x: X[start:end], y_: Y[start:end]}) if i % 1000 == 0: print("After %d training step(s), w1 is: " % (i)) print(sess.run(w1), "\n") print("Final w1 is: \n", sess.run(w1))<jupyter_output>After 0 training step(s), w1 is: [[-0.81031823] [ 1.4855988 ]] After 1000 training step(s), w1 is: [[0.01247114] [2.138545 ]] After 2000 training step(s), w1 is: [[0.45567423] [2.1706069 ]] After 3000 training step(s), w1 is: [[0.69968736] [1.846531 ]] After 4000 training step(s), w1 is: [[0.8988668] [1.2973604]] Final w1 is: [[1.0193471] [1.0428091]] <jupyter_text>#### 5. 重新定义损失函数,使得预测多了的损失大,于是模型应该偏向少的方向预测。<jupyter_code>loss_less = 1 loss_more = 10 loss = tf.reduce_sum(tf.where(tf.greater(y, y_), (y - y_) * loss_more, (y_ - y) * loss_less)) train_step = tf.train.AdamOptimizer(0.001).minimize(loss) with tf.Session() as sess: init_op = tf.global_variables_initializer() sess.run(init_op) STEPS = 5000 for i in range(STEPS): start = (i*batch_size) % 128 end = (i*batch_size) % 128 + batch_size sess.run(train_step, feed_dict={x: X[start:end], y_: Y[start:end]}) if i % 1000 == 0: print("After %d training step(s), w1 is: " % (i)) print(sess.run(w1), "\n") print("Final w1 is: \n", sess.run(w1))<jupyter_output>After 0 training step(s), w1 is: [[-0.8123182] [ 1.4835987]] After 1000 training step(s), w1 is: [[0.18643522] [1.0739335 ]] After 2000 training step(s), w1 is: [[0.95444274] [0.9808863 ]] After 3000 training step(s), w1 is: [[0.9557403] [0.9806634]] After 4000 training step(s), w1 is: [[0.95466024] [0.9813524 ]] Final w1 is: [[0.9556111] [0.9810191]] <jupyter_text>#### 6. 定义损失函数为MSE。<jupyter_code>loss = tf.losses.mean_squared_error(y, y_) train_step = tf.train.AdamOptimizer(0.001).minimize(loss) with tf.Session() as sess: init_op = tf.global_variables_initializer() sess.run(init_op) STEPS = 5000 for i in range(STEPS): start = (i*batch_size) % 128 end = (i*batch_size) % 128 + batch_size sess.run(train_step, feed_dict={x: X[start:end], y_: Y[start:end]}) if i % 1000 == 0: print("After %d training step(s), w1 is: " % (i)) print(sess.run(w1), "\n") print("Final w1 is: \n", sess.run(w1))<jupyter_output>After 0 training step(s), w1 is: [[-0.81031823] [ 1.4855988 ]] After 1000 training step(s), w1 is: [[-0.1333761] [ 1.8130922]] After 2000 training step(s), w1 is: [[0.32190308] [1.5246348 ]] After 3000 training step(s), w1 is: [[0.6785022] [1.2529727]] After 4000 training step(s), w1 is: [[0.8947401] [1.0859822]] Final w1 is: [[0.9743756] [1.0243336]]
no_license
/4/.ipynb_checkpoints/1. 自定义损失函数-checkpoint.ipynb
Smartuil/TensorFlow-GoogleDLFramework
6
<jupyter_start><jupyter_text>## Moving from words to phrases when doing NLP - [Abe Handler](https://www.abehandler.com/) University of Colorado, Boulder - [Shufan Wang](https://people.cs.umass.edu/~shufanwang/) University of Massachusetts, Amherst ## Introduction If you have found this tutorial (companion slides [here](https://docs.google.com/presentation/d/1C5O0EdgM33SO1KlCk90rW1g7nuf2264XcuYPZB-b7bI/edit?usp=sharing)), you have probably done NLP projects where you (a) start with documents, (b) break them into individual words, and then (c) use computation to draw conclusions about the words in the documents. For instance, in your last project, maybe you took a collection of documents, broke the documents into individual words and then ran a topic model to find groups of words that tended to appear together in the documents. Breaking documents into individual words implicitly represents text in terms of single-word units called **unigrams**. The unordered collection of all the unigrams in a document is often called a **bag of words** (see [Jurafsky and Martin](https://web.stanford.edu/~jurafsky/slp3/4.pdf)). Representing text using the unigram bag of words has many advantages. For one, analyzing unigrams is easy and fast; you can take a big text document and break it into a bunch of single-word observations, so you can observe useful statistical properties from the text. However, breaking documents into single words for downstream analysis does have downsides. One limitation is that some concepts or linguistic units within documents consist of multiple words, and so get lost or discarded when you using a unigram representation. For instance, the string "immigration hearing" refers to a particular legal proceeding. If you break this string into single word units "immigration" and "hearing", and put each of these unigrams to your bag of words, your representation of the text does not really represent the concept "immigration hearing". That means when you draw conclusions about lexical units during downsteam analysis, you won't be able to draw conclusions about "immigration hearing" (e.g., what liberal and conservative judges say about an "immigration hearing"). For this reason, it sometimes may make sense for you to analyze groups of words instead of unigrams. This tutorial will show you how to do NLP with groups of words, which we will call **phrases** or **multi-word expressions**. We will show how to (A) extract phrases from documents and (B) use these phrases for downstream analysis. ## High-level takeaways 1. **You can use phrases when you do NLP.** There are many existing tools and methods for extracting phrases (e.g. [PyATE](https://github.com/kevinlu1248/pyate)). In this tutorial, we will explore using phrases extracted via Python package [phrasemachine](https://github.com/slanglab/phrasemachine) which you can install using `pip install phrasemachine`. Phrasemachine is based on the method described in the paper [Bag of What](https://aclanthology.org/W16-5615.pdf). 2. **You can be creative and define phrases in a way that makes sense for your problem.** `phrasemachine` uses a grammar over part-of-speech tags to extract phrases. The particular phrasemachine patterns are often useful. But in your work, think of other kinds of phrasal patterns you might want to extract using regular expressions. For instance, if you are analyzing the political valance of economic theories, you might want to search economics papers for a pattern like "theory of \$ADJ\?(NOUN|PROPN)+" (e.g. "Theory of Monetary Policy"). 3. **Phrasemachine extracts discrete phrases, which comes with downsides and limitations**. Extracted discrete phrases are not distributed; so similar phrases get totally different discrete representations. Also, extracted discrete phrases are not contextual, meaning that representations do not reflect the different meaning of phrases in context. (An "immigration hearing" is different if it is an "EU immigration hearing" or an "immigration hearing in Texas"). Phrase-BERT (coming up!) addresses these real issues with phrasemachine, but does come with greater computational costs and engineering burdens. <jupyter_code>! wget https://zissou.infosci.cornell.edu/convokit/datasets/supreme-corpus/cases.jsonl -O cases.jsonl ! pip install phrasemachine ! pip install tqdm ! pip install convokit==2.5.2 # later versions seem to have some issue w/ a torch dependency. If you go to runtime-> restart runtime this seems to work (AH: Feb 15, 22) from convokit import Corpus, download # install the corpus here corpus = Corpus(filename=download("supreme-corpus")) # download the corpus<jupyter_output><empty_output><jupyter_text>#### Unigram bag of words Let's start with a single (short) document and break it into a unigram bag of words. It's easy to find packages for this online, but we will just use vanilla Python for this. A few notes: - We will define words using a whitespace delimiter below, but note there are also other better ways to do [tokenization](https://web.stanford.edu/~jurafsky/slp3/ed3book.pdf). - Note that a [bag](https://en.wikipedia.org/wiki/Multiset) is a set that allows duplicates; notice that the word `a` appears two times in the `unigram_bag_of_words`. - Note that each item in our bag is a unigram (single word)<jupyter_code>from collections import defaultdict document = "Solyndra received a loan guarantee. The Department of Energy offered the guarantee.".replace(".","") unigram_bag_of_words = defaultdict(int) for word in document.split(): unigram_bag_of_words[word] += 1 unigram_bag_of_words<jupyter_output><empty_output><jupyter_text>### Discussion: what are some phrases that get missed?### Adding phrases to the bag of words<jupyter_code>import phrasemachine from tqdm.notebook import tqdm text = "Solyndra received a loan guarantee. The Department of Energy offered the guarantee.".replace(".","") out = phrasemachine.get_phrases(text) out # here we are adding the phrases to the unigram bag of words enriched_bag_of_words = unigram_bag_of_words for phrase in out["counts"]: enriched_bag_of_words[phrase] = out['counts'][phrase] enriched_bag_of_words<jupyter_output><empty_output><jupyter_text>## Using phrases for downstream analysis Now that we know how to extract phrases using phrasemachine, we will now see how to use such phrases for downstream analysis. Specifically, we will analyze the ideological orientation of words and phrases in U.S. Supreme Court Oral Arguments. At a high level, we will ask: what kinds of things to liberal and conservative justices tend to bring up during oral arguments? What would you expect to liberals and convservatives to talk about?##### Corpus We will use the [`convokit`](https://convokit.cornell.edu/documentation/supreme.html) corpus of supreme court oral arguments. In this notebook, we will only examine comments from liberal and conservative justices from the years 2010-2019.In this analysis, we will investigate which phrases are used by liberal (L) and conservative (C) justices. So we need a mapping of justices to ideologies, which we construct manually below.<jupyter_code>judge2ideology = {'j__john_g_roberts_jr': "C", 'j__samuel_a_alito_jr': "C", 'j__ruth_bader_ginsburg': "L", 'j__sonia_sotomayor': "L", 'j__antonin_scalia': "L", 'j__stephen_g_breyer': "L", 'j__anthony_m_kennedy': "C", 'j__elena_kagan': "L", 'j__clarence_thomas': "C", 'j__neil_gorsuch': "C", 'j__brett_m_kavanaugh': "C" }<jupyter_output><empty_output><jupyter_text>We also build a set of all justices in the dataset.<jupyter_code>import json def get_justices(input_file="cases.jsonl"): '''Get names of all justices in the dataset''' all_justices = set() with open(input_file, "r") as inf: for j in inf: j = json.loads(j) if j["votes"] is not None: for justice in j["votes"].keys(): all_justices.add(justice) return all_justices all_justices = get_justices()<jupyter_output><empty_output><jupyter_text>The next step is to extract words and phrases for the justices. In computing this information, we do two things to make the computational requirements more managable: 1. We limit our analysis to court cases from 2010-2019, which is why you see `u.meta["case_id"][0:3] == "201"` below. 2. We also only extract phrases from the first 500 characters of the utterance.<jupyter_code>utterances = [] # build a list of the utterances we are interested in for u in tqdm(corpus.get_utterance_ids()): u = corpus.get_utterance(u) if u.speaker.id in all_justices and u.meta["case_id"][0:3] == "201": utterances.append(u)<jupyter_output><empty_output><jupyter_text>### Extracting phrases The code below extracts and counts phrases from liberal and conservative justices<jupyter_code>from collections import defaultdict # https://docs.python.org/3/library/collections.html#collections.defaultdict from tqdm.notebook import tqdm justice2phrases = defaultdict(lambda: defaultdict(int)) for u in tqdm(utterances): phrases = phrasemachine.get_phrases(u.text[0:500])["counts"] # roughly 97.5% are less than 500 chars, and runs way faster for p in phrases: # we can filter out some filler/stop phrases here, e.g. when the record notes laughter if "justice" not in p and "mr." not in p and "minutes" not in p and "laugher" not in p: justice2phrases[judge2ideology[u.speaker.id]][p] += phrases[p] phrasecounts = justice2phrases # this builds a dictionary of count of phrases by liberal/conservative judges<jupyter_output><empty_output><jupyter_text>### Extracting words The code below extracts and counts unigrams from liberal and conservative justices<jupyter_code>justice2words = defaultdict(lambda: defaultdict(int)) for u in tqdm(utterances): words = u.text[0:500].split() for word in words: # we can filter out some filler/stop phrases here, e.g. when the record notes laughter if "justice" not in word and "mr." not in word and "minutes" not in word and "laugher" not in word: justice2words[judge2ideology[u.speaker.id]][word] += 1 wordcounts = justice2words # this builds a dictionary of count of phrases by liberal/conservative judges<jupyter_output><empty_output><jupyter_text>## Analyzing word use Now that we have counted words and phrases from liberal and convservative justices, we will analyze differences in the political orientation of words and phrases. Specifically we will: - Compute statistics about how frequently liberal and conservative justices use particular words and - Display this information on a plot for analysis Our approach is based on the [Fightin' Words](http://languagelog.ldc.upenn.edu/myl/Monroe.pdf) method from Monroe et al. Specifically, we will use the word importance score from Section 3.2.2 of Fightin' Words. If you are curious, the paper describes other word importance scores.<jupyter_code>import pandas as pd def compute_normalize_counts(_countdict): normalized_counts = defaultdict(lambda: defaultdict(int)) for wing in _countdict.keys(): for p in _countdict[wing]: normalized_counts[wing][p] = _countdict[wing][p]/n[wing] return normalized_counts def compute_phrase_scores(normalized, _countdict): df = [] for wing in _countdict.keys(): for phrase in _countdict[wing]: df.append({"score": normalized["L"][phrase] - normalized["C"][phrase], "phrase": phrase, "count": _countdict["C"][phrase] + _countdict["L"][phrase]}) # http://languagelog.ldc.upenn.edu/myl/Monroe.pdf, 3.2.2 return pd.DataFrame(df).drop_duplicates() def getK(_df, k=20): if k > 0: return _df.sort_values("score")[0:k].copy() else: return _df.sort_values("score")[k:].copy() def get_top_K_df(counts): countdict = counts n = {} n["C"] = sum(countdict["C"].values()) n["L"] = sum(countdict["L"].values()) normalized_counts = compute_normalize_counts(countdict) df = compute_phrase_scores(normalized_counts, countdict) df = df[df["count"] < 200] # exclude high-count lexical items, roughly stop words # add a label field to the data frame for altair df["label"] = df["phrase"].apply(lambda x: x if x in tops["phrase"].to_list() else "") # add an abolute value of the score df["score_abs"] = df["score"].apply(lambda x: abs(x)) return pd.concat([getK(df, k=-20), getK(df, k=20)]) tops_phrases = get_top_K_df(phrasecounts) tops_words = get_top_K_df(wordcounts) tops_words["label"] = tops_words["phrase"] # add words to doc? import altair as alt import pandas as pd def make_plot(source): height = 1000 points = alt.Chart(source).mark_circle().encode( x='count:Q', y='score:Q', size='score_abs', color=alt.Color('score:Q', scale=alt.Scale(scheme='redyellowblue')) ).properties( width=1200, height=height ) text = alt.Chart(source).mark_text( align='left', baseline='middle', dx=7 ).encode( x='count:Q', y='score:Q', text='label' ).properties( width=1200, height=height ) return points + text make_plot(tops_phrases) make_plot(tops_words)<jupyter_output><empty_output><jupyter_text>### Discussion - Comparing the unigram plot to the plot with phrases, what do you notice? - Which plot gives you a clearer sense what justices tend to talk about. This is sometimes called being more "[interpretable](https://arxiv.org/abs/1702.08608)".<jupyter_code>import phrasemachine utterances = [] # build a list of the utterances we are interested in c = 0 def theory_phrases(_text): for phrase in phrasemachine.get_phrases(_text)["counts"]: if phrase[0:len("theory of")] == "theory of": yield phrase for u in tqdm(corpus.get_utterance_ids()): u = corpus.get_utterance(u) c += 1 if c == 100000: break if "theory of" in u.text: for phrase in theory_phrases(u.text): print(phrase) co <jupyter_output><empty_output>
no_license
/Phrases.ipynb
AbeHandler/AbeHandler.github.io
10
<jupyter_start><jupyter_text>Load the LogisticData.csv for this assignment and check if it has loaded successfully. In this excercise, we will use the same dataset for training and testing. After training the model on the best beta, we will see how well does our model perform.<jupyter_code>df = pd.read_csv("LogisticData.csv") #print df<jupyter_output><empty_output><jupyter_text>Next, preprocess the data. X should contain only the predictors, Y shuould be the reponse variable and beta should be a vector with length = the number of features and value = 0.<jupyter_code>cols_to_norm = [] #Normalize only if you are using gradient descent. Use standard deviation for normalization. j = ['Score1','Score2'] df[j] = df[j].apply(lambda x: (x - x.mean()) / (x.std())) df['beta0'] = 1.0 X = df.drop('Admitted',axis=1) Y = df['Admitted'] beta = np.matrix(np.zeros(shape=(1,3))) groundTruth = df['Admitted'] X = np.matrix(X.values) Y = np.matrix(Y.values)<jupyter_output><empty_output><jupyter_text>Define a sigmoid function and return the value tht has been calculated for z<jupyter_code>def sigmoid(z): ''' Here sigmoid value of 'z' needs to be returned. ''' sig = 1/(1+(np.exp(-z))) return sig<jupyter_output><empty_output><jupyter_text>Define the cost function for Logistic Regression. Remember to calculate the sigmoid values as well.<jupyter_code>def costFunction(beta, X, Y): ''' This function returns the value computed from the cost function. ''' a=sigmoid(X.dot(beta.T)) result = (1./len(X)) * (-(Y).dot(np.log(a)) - (1-Y).dot(np.log(1-a))) return result<jupyter_output><empty_output><jupyter_text>Define a gradient function that takes in beta, X and Y as parameters and returns the best betas and cost. <jupyter_code> def gradientDescent(X, Y, beta, alpha, iters): m=Y.size cost = np.zeros(shape=(iters, 1)) newcost = np.zeros(shape=(iters, 1)) for i in range(iters): pred = sigmoid(X.dot(beta.T)) error = pred - Y beta_s=beta.size cost[i, 0] = costFunction(beta, X, Y) for it in range(beta_s): t=np.matrix(X[:,it]) t.shape=(m,1) t = np.dot(error, t) beta.T[it][0] = beta.T[it][0] - ((alpha/len(X)) * t.sum()) return beta, cost #def gradient(beta, X, Y): #''' #This function returns the gradient calucated. #''' #for i in range(parameters): ##### #grad[i] = #m,n=np.shape(X) #grad=np.zeros(n) #Sq=sigmoid(np.dot(X,beta.T))-Y #for j in range(3): #t=np.multiply(Sq,X[:,j]) #grad[j]=np.sum(t)/m #return grad #the optimised betas are stored in the first index of the result variable #result = opt.fmin_tnc(func=costFunction , x0=beta , fprime =gradient , args=(X,Y) ) #print result<jupyter_output><empty_output><jupyter_text>Try out multiple values of 'alpha' and 'iters' so that you get the optimum result.<jupyter_code>#please try different values to see the results, but alpha=0.01 and iters=10000 are suggested. alpha = 0.01 iters = 10000 result = gradientDescent(X, Y, beta, alpha, iters) <jupyter_output>C:\Users\Niha\Anaconda2\lib\site-packages\ipykernel\__main__.py:6: RuntimeWarning: divide by zero encountered in log C:\Users\Niha\Anaconda2\lib\site-packages\ipykernel\__main__.py:5: RuntimeWarning: overflow encountered in exp <jupyter_text>Now , only define the gradient function that we can use in the SciPy's optimize module to find the optimal betas. Optimize the parameters given functions to compute the cost and the gradients. We can use SciPy's optimization to do the same thing. Define a variable result and complete the functions by adding the right parameters.Define a predict function that returns 1 if the probablity of the result from the sigmoid function is greater than 0.5, using the best betas and 0 otherwise.<jupyter_code>def predict(beta, X): ''' This function returns a list of predictions calculated from the sigmoid using the best beta. ''' print beta,X.shape probs = sigmoid(X.dot(beta.T)) #print probs return [1 if x > 0.5 else 0 for x in probs] <jupyter_output><empty_output><jupyter_text>Store the prediction in a list after calling the predict function with best betas and X.<jupyter_code>bestBeta = np.matrix(result[0]) predictions = predict(bestBeta, X) #print predictions<jupyter_output>[[ 2798.19456602 2497.28062093 556.03867103]] (100L, 3L) <jupyter_text>Calculate the accuracy of your model. The function should take the prediction and groundTruth as inputs and return the confusion matrix. The confusion matrix is of 'dataframe' type.<jupyter_code>def confusionMatrix(prediction, groundTruth): ''' Return the computed confusion matrix. ''' return pd.crosstab(groundTruth, prediction, rownames=['True'], colnames=['Predicted'], margins=False) <jupyter_output><empty_output><jupyter_text>Call the confusionMatrix function and print the confusion matrix as well as the accuracy of the model.<jupyter_code>#The final outputs that we need for this portion of the lab are conf and acc. Copy conf and acc in a .txt file. #Please write a SHORT report and explain these results. Include the explanations for both logistic and linear regression #in the same PDF file. groundTruth = pd.Series(groundTruth) prediction = pd.Series(predictions) conf = confusionMatrix(prediction, groundTruth) print conf M=np.array(conf) acc = M.trace()/len(X)*100 print 'Accuracy = '+str(acc)+'%'<jupyter_output>Predicted 0 1 True 0 37 3 1 9 51 Accuracy = 88.0%
no_license
/LogisticRegression.ipynb
Niha97/Logistic-Regression---Gradient-Descent-
10
<jupyter_start><jupyter_text># Chapter 12## Setup and imports<jupyter_code>%matplotlib inline import warnings warnings.filterwarnings('ignore') from collections import OrderedDict import numpy as np import pandas as pd import statsmodels.api as sm import scipy.stats import matplotlib.pyplot as plt nhefs_all = pd.read_excel('NHEFS.xls')<jupyter_output>WARNING *** OLE2 inconsistency: SSCS size is 0 but SSAT size is non-zero <jupyter_text>Just a look at a couple basic details of the dataset<jupyter_code>nhefs_all.shape nhefs_all.columns<jupyter_output><empty_output><jupyter_text>## Section 12.1### Program 12.1"We restricted the analysis to NHEFS individuals with known sex, age, race, ..." (pg 149, margin)<jupyter_code>restriction_cols = [ 'sex', 'age', 'race', 'wt82', 'ht', 'school', 'alcoholpy', 'smokeintensity' ] missing = nhefs_all[restriction_cols].isnull().any(axis=1) nhefs = nhefs_all.loc[~missing] nhefs.shape<jupyter_output><empty_output><jupyter_text>We're going to add some columns to help calculate Table 12.1, and a `constant` column, which will be useful for modeling<jupyter_code>nhefs['constant'] = 1 nhefs['university'] = (nhefs.education == 5).astype('int') nhefs['inactive'] = (nhefs.active == 2).astype('int') nhefs['no_exercise'] = (nhefs.exercise == 2).astype('int')<jupyter_output><empty_output><jupyter_text>Average weight gains in quitters and non-quitters:<jupyter_code>ave_gain_quit = nhefs[nhefs.qsmk == 1].wt82_71.mean() ave_gain_noquit = nhefs[nhefs.qsmk == 0].wt82_71.mean() print("Average weight gain") print(" quitters: {:>0.1f} kg".format(ave_gain_quit)) print(" non-quitters: {:>0.1f} kg".format(ave_gain_noquit))<jupyter_output>Average weight gain quitters: 4.5 kg non-quitters: 2.0 kg <jupyter_text>Create a simple linear model to get a confidence interval on weight difference.<jupyter_code>ols = sm.OLS(nhefs.wt82_71, nhefs[['constant', 'qsmk']]) res = ols.fit() res.summary().tables[1] est = res.params.qsmk conf_ints = res.conf_int(alpha=0.05, cols=None) lo, hi = conf_ints[0]['qsmk'], conf_ints[1]['qsmk'] print(' estimate 95% C.I.') print('difference {:>6.2f} ({:>0.1f}, {:>0.1f})'.format(est, lo, hi))<jupyter_output> estimate 95% C.I. difference 2.54 (1.7, 3.4) <jupyter_text>Create Table 12.1 in the margin of pg 149.<jupyter_code>summaries = OrderedDict(( ('age', 'mean'), ('sex', lambda x: (100 * (x == 0)).mean()), ('race', lambda x: (100 * (x == 0)).mean()), ('university', lambda x: 100 * x.mean()), ('wt71', 'mean'), ('smokeintensity', 'mean'), ('smokeyrs', 'mean'), ('no_exercise', lambda x: 100 * x.mean()), ('inactive', lambda x: 100 * x.mean()) )) table = nhefs.groupby('qsmk').agg(summaries) table.sort_index(ascending=False, inplace=True) table = table.T table.index = [ 'Age, years', 'Men, %', 'White, %', 'University education, %', 'Weight, kg', 'Cigarettes/day', 'Years smoking', 'Little or no exercise, %', 'Inactive daily life, %' ] table.style.format("{:>0.1f}")<jupyter_output><empty_output><jupyter_text>## Section 12.2### Program 12.2We're going to be modeling with squared terms and some categorical features. Here we'll explicitly add squared features and dummy features to the data. In later chapters we'll use Statsmodels' formula syntax.Squared features:<jupyter_code>for col in ['age', 'wt71', 'smokeintensity', 'smokeyrs']: nhefs['{}^2'.format(col)] = nhefs[col] * nhefs[col]<jupyter_output><empty_output><jupyter_text>Dummy features:<jupyter_code>edu_dummies = pd.get_dummies(nhefs.education, prefix='edu') exercise_dummies = pd.get_dummies(nhefs.exercise, prefix='exercise') active_dummies = pd.get_dummies(nhefs.active, prefix='active') nhefs = pd.concat( [nhefs, edu_dummies, exercise_dummies, active_dummies], axis=1 )<jupyter_output><empty_output><jupyter_text>We're going to be creating a lot of IP weights from logistic regressions so a function will help reduce the work. The following function creates the denominators of the IP weights.<jupyter_code>def logit_ip_f(y, X): """ Create the f(y|X) part of IP weights from logistic regression Parameters ---------- y : Pandas Series X : Pandas DataFrame Returns ------- Numpy array of IP weights """ model = sm.Logit(y, X) res = model.fit() weights = np.zeros(X.shape[0]) weights[y == 1] = res.predict(X.loc[y == 1]) weights[y == 0] = (1 - res.predict(X.loc[y == 0])) return weights X_ip = nhefs[[ 'constant', 'sex', 'race', 'age', 'age^2', 'edu_2', 'edu_3', 'edu_4', 'edu_5', 'smokeintensity', 'smokeintensity^2', 'smokeyrs', 'smokeyrs^2', 'exercise_1', 'exercise_2', 'active_1', 'active_2', 'wt71', 'wt71^2' ]] denoms = logit_ip_f(nhefs.qsmk, X_ip) weights = 1 / denoms print('IP weights') print(' min: {:>5.2f} expected: 1.05'.format(weights.min())) print(' max: {:>5.2f} expected: 16.70'.format(weights.max())) print(' mean: {:>5.2f} expected: 2.00'.format(weights.mean())) fig, ax = plt.subplots(figsize=(8, 6)) ax.hist(weights, bins=20);<jupyter_output><empty_output><jupyter_text>Now, the main model<jupyter_code>y = nhefs.wt82_71 X = nhefs[['constant', 'qsmk']]<jupyter_output><empty_output><jupyter_text>Weighted least squares gives the right coefficients, but the standard error is off.<jupyter_code>wls = sm.WLS(y, X, weights=weights) res = wls.fit() res.summary().tables[1]<jupyter_output><empty_output><jupyter_text>GEE gives the right coefficients and better standard errors<jupyter_code>gee = sm.GEE( nhefs.wt82_71, nhefs[['constant', 'qsmk']], groups=nhefs.seqn, weights=weights ) res = gee.fit() res.summary().tables[1] est = res.params.qsmk conf_ints = res.conf_int(alpha=0.05, cols=None) lo, hi = conf_ints[0]['qsmk'], conf_ints[1]['qsmk'] print(' estimate 95% C.I.') print('theta_1 {:>6.2f} ({:>0.1f}, {:>0.1f})'.format(est, lo, hi))<jupyter_output> estimate 95% C.I. theta_1 3.44 (2.4, 4.5) <jupyter_text>Here's a simple check that there is no association between `sex` and `qsmk`.<jupyter_code>pd.crosstab(nhefs.sex, nhefs.qsmk, weights, aggfunc='sum')<jupyter_output><empty_output><jupyter_text>(This matches the R output, but the Stata output is different.)<jupyter_code>subset_indices = (nhefs.race == 0) & (nhefs.sex == 1) subset = nhefs.loc[subset_indices]<jupyter_output><empty_output><jupyter_text>Now a check for positivity<jupyter_code>crosstab = pd.crosstab(subset.age, subset.qsmk).sort_index() fig, ax = plt.subplots(figsize=(8, 6)) ax.axhline(0, c='gray') ax.plot(crosstab.index, crosstab[0], label='non-quitters') ax.plot(crosstab.index, crosstab[1], label='quitters') ax.set_xlabel('age', fontsize=14) ax.set_ylabel('count', fontsize=14) ax.legend(fontsize=12);<jupyter_output><empty_output><jupyter_text>We see that there are actually a few ages with zero counts<jupyter_code>crosstab.iloc[-10:]<jupyter_output><empty_output><jupyter_text>For a discussion on ages with zero counts, see Fine Point 12.2, pg 155.## Section 12.3"The effect estimate obtained in the pseudo-population created by weights $0.5 \, / \, f(A|L)$ is equal to that obtained in the pseudo-population created by weights $1 \, / \, f(A|L)$."<jupyter_code>gee = sm.GEE( nhefs.wt82_71, nhefs[['constant', 'qsmk']], groups=nhefs.seqn, weights=(0.5 * weights) ) res = gee.fit() res.summary().tables[1]<jupyter_output><empty_output><jupyter_text>"Second, we need to estimate Pr[A=1] for the numerator of the weights. We can obtain a nonparametric estimate by the ratio 403/1566 or, equivalently, by fitting a saturated logistic model for Pr[A=1] with an intercept and no covariates." pg 154<jupyter_code>qsmk = (nhefs.qsmk == 1) # option 1 qsmk_mean = qsmk.mean() qsmk_mean # option 2 lgt = sm.Logit(qsmk, nhefs.constant) res = lgt.fit() res.summary().tables[1] lgt_pred = res.predict()<jupyter_output><empty_output><jupyter_text>Check for equivalence<jupyter_code>equivalent = np.all(np.isclose(lgt_pred, qsmk_mean)) print('equivalent: {}'.format(equivalent))<jupyter_output>equivalent: True <jupyter_text>### Program 12.3Create stabilized IP weights. Shortcut: modify the IP weights already calculated.<jupyter_code>s_weights = np.zeros(nhefs.shape[0]) s_weights[qsmk] = qsmk.mean() * weights[qsmk] # `qsmk` was defined a few cells ago s_weights[~qsmk] = (1 - qsmk).mean() * weights[~qsmk] print('Stabilized weights') print(' min mean max') print('------------------') print('{:>04.2f} {:>04.2f} {:>04.2f}'.format( s_weights.min(), s_weights.mean(), s_weights.max() ))<jupyter_output>Stabilized weights min mean max ------------------ 0.33 1.00 4.30 <jupyter_text>Refit the model from the last section, using the new weights<jupyter_code>gee = sm.GEE( nhefs.wt82_71, nhefs[['constant', 'qsmk']], groups=nhefs.seqn, weights=s_weights ) res = gee.fit() res.summary().tables[1] est = res.params.qsmk conf_ints = res.conf_int(alpha=0.05, cols=None) lo, hi = conf_ints[0]['qsmk'], conf_ints[1]['qsmk'] print(' estimate 95% C.I.') print('theta_1 {:>6.2f} ({:>0.1f}, {:>0.1f})'.format(est, lo, hi))<jupyter_output> estimate 95% C.I. theta_1 3.44 (2.4, 4.5) <jupyter_text>The estimate is the same as in the previous sectionWe can check again for no association between sex and qsmk in the the pseudo-population<jupyter_code>pd.crosstab(nhefs.sex, nhefs.qsmk, s_weights, aggfunc='sum')<jupyter_output><empty_output><jupyter_text>## Section 12.4### Program 12.4Subset the data to subjects that smoked 25 or fewer cigarettes per day at baseline. In this case, we can either obtain the subset from the original dataset, or we can obtain it from the reduced dataset that we've been using. I'll get it from the reduced subset, since it already contains dummy features we'll need.<jupyter_code># from original dataset intensity25 = nhefs_all.loc[ (nhefs_all.smokeintensity <= 25) & ~nhefs_all.wt82.isnull() ] intensity25.shape # from reduced dataset intensity25 = nhefs.loc[nhefs.smokeintensity <= 25] intensity25.shape<jupyter_output><empty_output><jupyter_text>Create the stabilized IP weights $SW^A = f(A) \, / \, f(A|L)$"we assumed that the density f(A|L) was normal (Gaussian) with mean $\mu = E[A|L]$ and variance $\sigma^2$. We then used a linear regression model to estimate the mean $E[A|L]$ and variance of residuals $\sigma^2$ for all combinations of values of L." pg 156 <jupyter_code>A = intensity25.smkintensity82_71 X = intensity25[[ 'constant', 'sex', 'race', 'edu_2', 'edu_3', 'edu_4', 'edu_5', 'exercise_1', 'exercise_2', 'active_1', 'active_2', 'age', 'age^2', 'wt71', 'wt71^2', 'smokeintensity', 'smokeintensity^2', 'smokeyrs', 'smokeyrs^2' ]] ols = sm.OLS(A, X) res = ols.fit() A_pred = res.predict(X) # i.e., E[A|L]<jupyter_output><empty_output><jupyter_text>The denominator is the distribution, $N(\mu, \sigma)$, evaluated at each point of $y = A$.<jupyter_code>fAL = scipy.stats.norm.pdf( A, # A A_pred, # mu = E[A|L] np.sqrt(res.mse_resid) # sigma )<jupyter_output><empty_output><jupyter_text>"We also assumed that the density f(A) in the numerator was normal."<jupyter_code>fig, ax = plt.subplots(figsize=(8, 6)) A.hist(bins=30, ax=ax); A.mean(), A.std() fA = scipy.stats.norm.pdf(A, A.mean(), A.std())<jupyter_output><empty_output><jupyter_text>Then the stabilized IP weights are<jupyter_code>sw = fA / fAL print('Stabilized weights') print(' min mean max') print('------------------') print('{:>04.2f} {:>04.2f} {:>04.2f}'.format( sw.min(), sw.mean(), sw.max() ))<jupyter_output>Stabilized weights min mean max ------------------ 0.19 1.00 5.10 <jupyter_text>Now fit the marginal structural model<jupyter_code>y = intensity25.wt82_71 X = pd.DataFrame(OrderedDict(( ('constant', np.ones(y.shape[0])), ('A', A), ('A^2', A**2) ))) model = sm.GEE( y, X, groups=intensity25.seqn, weights=sw ) res = model.fit() res.summary().tables[1]<jupyter_output><empty_output><jupyter_text>To get the estimate and confidence interval for "no change", you can read off the values in the `constant` row above (because `A` and `A^2` will be zero). Getting Statmodels to calculate the estimate and confidence interval for when smoking increases by 20 cigarettes / day will take a couple extra steps. In Chapter 11, the regression result had a `get_prediction` method. The GEE result doesn't (yet?) have that _method_, so we'll use the hidden `get_prediction` _function_.<jupyter_code>from statsmodels.regression._prediction import get_prediction pred_inputs = [ [1, 0, 0], # no change in smoking intensity [1, 20, 20**2], # plus 20 cigarettes / day ] pred = get_prediction(res, exog=pred_inputs) summary = pred.summary_frame().round(1) summary[["mean", "mean_ci_lower", "mean_ci_upper"]]<jupyter_output><empty_output><jupyter_text>We can relabel the rows and columns to make this table a little nicer<jupyter_code>summary = summary[["mean", "mean_ci_lower", "mean_ci_upper"]] summary.index = ["no change", "+20 per day"] summary.columns = ["estimate", "CI lower", "CI upper"] summary<jupyter_output><empty_output><jupyter_text>Note: since the `get_predictions` function wasn't attached to the GEE regression result, it might not work correctly with other versions of the GEE model.### Program 12.5"if interested in the causal effect of quitting smoking A (1: yes, 0: no) on the risk of death D (1: yes, 0: no) by 1982, one could consider a _marginal structural logistic model_"<jupyter_code>model = sm.GEE( nhefs.death, nhefs[['constant', 'qsmk']], groups=nhefs.seqn, weights=s_weights, family=sm.families.Binomial() ) res = model.fit() res.summary().tables[1]<jupyter_output><empty_output><jupyter_text>Odd ratio is $\exp(\hat{\theta}_1)$<jupyter_code>est = np.exp(res.params.qsmk) conf_ints = res.conf_int(alpha=0.05, cols=None) lo = np.exp(conf_ints[0]['qsmk']) hi = np.exp(conf_ints[1]['qsmk']) print(' estimate 95% C.I.') print('odds ratio {:>6.2f} ({:>0.1f}, {:>0.1f})'.format(est, lo, hi))<jupyter_output> estimate 95% C.I. odds ratio 1.03 (0.8, 1.4) <jupyter_text>## Section 12.5### Program 12.6Create the numerator of the IP weights. Reuse the basic `weights` for the denominator.<jupyter_code>numer = logit_ip_f(nhefs.qsmk, nhefs[['constant', 'sex']]) sw_AV = numer * weights print('Stabilized weights') print(' min mean max') print('------------------') print('{:>04.2f} {:>04.2f} {:>04.2f}'.format( sw_AV.min(), sw_AV.mean(), sw_AV.max() )) nhefs.shape nhefs['qsmk_and_female'] = nhefs.qsmk * nhefs.sex model = sm.WLS( nhefs.wt82_71, nhefs[['constant', 'qsmk', 'sex', 'qsmk_and_female']], weights=sw_AV ) res = model.fit(cov_type='cluster', cov_kwds={'groups': nhefs.seqn}) res.summary().tables[1]<jupyter_output><empty_output><jupyter_text>## Section 12.6### Program 12.7We're going back to the original dataset<jupyter_code>nhefs_all.shape<jupyter_output><empty_output><jupyter_text>We'll add features that were added to the reduced dataset that we've been usingAdd constant feature<jupyter_code>nhefs_all['constant'] = 1<jupyter_output><empty_output><jupyter_text>Add dummy features<jupyter_code>edu_dummies = pd.get_dummies(nhefs_all.education, prefix='edu') exercise_dummies = pd.get_dummies(nhefs_all.exercise, prefix='exercise') active_dummies = pd.get_dummies(nhefs_all.active, prefix='active') nhefs_all = pd.concat( [nhefs_all, edu_dummies, exercise_dummies, active_dummies], axis=1 )<jupyter_output><empty_output><jupyter_text>Add squared features<jupyter_code>for col in ['age', 'wt71', 'smokeintensity', 'smokeyrs']: nhefs_all['{}^2'.format(col)] = nhefs_all[col] * nhefs_all[col]<jupyter_output><empty_output><jupyter_text>We'll also add a feature to track censored individuals<jupyter_code>nhefs_all['censored'] = nhefs_all.wt82.isnull().astype('int')<jupyter_output><empty_output><jupyter_text>Create the IP weights for treatment<jupyter_code>X_ip = nhefs_all[[ 'constant', 'sex', 'race', 'edu_2', 'edu_3', 'edu_4', 'edu_5', 'exercise_1', 'exercise_2', 'active_1', 'active_2', 'age', 'age^2', 'wt71', 'wt71^2', 'smokeintensity', 'smokeintensity^2', 'smokeyrs', 'smokeyrs^2' ]] ip_denom = logit_ip_f(nhefs_all.qsmk, X_ip) ip_numer = logit_ip_f(nhefs_all.qsmk, nhefs_all.constant) sw_A = (ip_numer / ip_denom) print('Stabilized weights') print(' min mean max') print('------------------') print('{:>04.2f} {:>04.2f} {:>04.2f}'.format( sw_A.min(), sw_A.mean(), sw_A.max() ))<jupyter_output>Stabilized weights min mean max ------------------ 0.33 1.00 4.21 <jupyter_text>Now the IP weights for censoring<jupyter_code># same as previous, but with 'qsmk' added X_ip = nhefs_all[[ 'constant', 'sex', 'race', 'edu_2', 'edu_3', 'edu_4', 'edu_5', 'exercise_1', 'exercise_2', 'active_1', 'active_2', 'age', 'age^2', 'wt71', 'wt71^2', 'smokeintensity', 'smokeintensity^2', 'smokeyrs', 'smokeyrs^2', 'qsmk' ]] ip_denom = logit_ip_f(nhefs_all.censored, X_ip) ip_numer = logit_ip_f( nhefs_all.censored, nhefs_all[['constant', 'qsmk']] ) sw_C = ip_numer / ip_denom sw_C[nhefs_all.censored == 1] = 1 print('Stabilized weights') print(' min mean max') print('------------------') print('{:>04.2f} {:>04.2f} {:>04.2f}'.format( sw_C.min(), sw_C.mean(), sw_C.max() ))<jupyter_output>Stabilized weights min mean max ------------------ 0.94 1.00 1.72 <jupyter_text>Now create the combined IP weights<jupyter_code>sw_AC = (sw_A * sw_C) print('Stabilized weights') print(' min mean max') print('------------------') print('{:>04.2f} {:>04.2f} {:>04.2f}'.format( sw_AC.min(), sw_AC.mean(), sw_AC.max() ))<jupyter_output>Stabilized weights min mean max ------------------ 0.35 1.00 4.09 <jupyter_text>Now model weight gain using the combined IP weights<jupyter_code>wls = sm.WLS( nhefs.wt82_71, nhefs[['constant', 'qsmk']], weights=sw_AC[nhefs_all.censored == 0] ) res = wls.fit(cov_type='cluster', cov_kwds={'groups': nhefs.seqn}) res.summary().tables[1]<jupyter_output><empty_output>
no_license
/chapter12.ipynb
IanFla/causal_inference_python_code
43
<jupyter_start><jupyter_text># Read data<jupyter_code>data_all = pd.read_csv('communities.data', header=None) #the first 5 colums are not predictive data = data_all.values[:, 5: -1] label = data_all.values[:, -1].astype(np.float) #delete some colums from DataFrame #data.drop(colums=[1, 2, 3, 4]) #clean the data #convert string '?' to 'NaN' data = np.array([['NaN' if col=='?' else col for col in row] for row in data]) #convert string to corresponding number # data_training = data.iloc[:1495, :] # data_testing = data.iloc[1495:, :] data_training = data[:1495, :] data_testing = data[1495:, :] label_training = label[:1495] label_testing = label[1495:] <jupyter_output><empty_output><jupyter_text># Deal with the missing value<jupyter_code>si = Imputer(missing_values=np.nan, strategy='mean', axis=0) si.fit(data_training) train_imputed = si.transform(data_training) test_imputed = si.transform(data_testing) print(train_imputed) print(test_imputed)<jupyter_output>[[ 0.19 0.33 0.02 ..., 0.5 0.32 0.14 ] [ 0. 0.16 0.12 ..., 0.45188285 0. 0.19075314] [ 0. 0.42 0.49 ..., 0.45188285 0. 0.19075314] ..., [ 0.01 0.36 0.95 ..., 0.45188285 0. 0.19075314] [ 0.06 0.53 0.01 ..., 0.45188285 0. 0.19075314] [ 0.05 0.43 0.08 ..., 0. 0.51 0.18 ]] [[ 0. 0.2 0.03 ..., 0.45188285 0. 0.19075314] [ 0. 0.55 0.13 ..., 0.45188285 0. 0.19075314] [ 0. 0.52 0.04 ..., 0.45188285 0. 0.19075314] ..., [ 0.16 0.37 0.25 ..., 0. 0.91 0.28 ] [ 0.08 0.51 0.06 ..., 0. 0.22 0.18 ] [ 0.2 0.78 0.14 ..., 0.5 1. 0.13 ]] <jupyter_text># Correlation Matrix<jupyter_code># print(pd.DataFrame(train_imputed).corr()) # plt.figure(figsize=(8, 8)) # plt.matshow(pd.DataFrame(train_imputed).corr())<jupyter_output><empty_output><jupyter_text># Coeficient of Variation CV * cv = s/m where s denotes sample variance and m denotes sample mean. * how to derive valuable information from scatter plot? From the scatter plot between label and variable, we're able to infer the insignificant features, such as feature 6<jupyter_code>#variation method calculates the coefficient of variance in the form of #standard deviation / mean cv = variation(train_imputed) n_features = int(np.floor(np.sqrt(128))) # n_features = 12 highest_cv_features = train_imputed[:, np.argsort(cv)[-n_features:]] highest_cv_features_label = np.c_[highest_cv_features, label_training.reshape(-1, 1)] highest_cv_features_label = pd.DataFrame(highest_cv_features_label) scatter_matrix(highest_cv_features_label, figsize=(16,16)) highest_cv_features = pd.DataFrame(highest_cv_features) # print(highest_cv_features_label) scatter_matrix(highest_cv_features, figsize=(16,16)) <jupyter_output><empty_output><jupyter_text># Linear Regression Model<jupyter_code>cv = variation(train_imputed) n_features = int(np.floor(np.sqrt(128))) index_features = np.argsort(cv)[-n_features:] train_selected = train_imputed[:, index_features] test_selected = test_imputed[:, index_features] lr = LinearRegression() lr.fit(train_selected, label_training) label_predicted = lr.predict(test_selected) print(lr.coef_) # print(label_predicted) # print(label_testing) MSE = mean_squared_error(label_testing ,label_predicted) print('MSE %.5f' % MSE) r_square = lr.score(test_selected, label_testing) r_square<jupyter_output><empty_output><jupyter_text># LASSO and Ridge Regression the data is not normalized<jupyter_code># alpha = [0.1, 1, 10, 100] # alpha = np.arange(1, 100, 5) alpha = np.logspace(-4, 2, 15) lasso_lr = LassoCV(alphas=alpha, cv=10) lasso_lr_norm = LassoCV(alphas=alpha, normalize=True, cv=10) ridge_lr = RidgeCV(alphas=alpha, cv=10) classifiers = [lasso_lr, lasso_lr_norm, ridge_lr] clf_names = ['lasso lr', 'lasso lr normalized','ridge lr'] for clf, name in zip(classifiers, clf_names): clf.fit(train_selected, label_training) clf.predict(test_selected) label_predicted = clf.predict(test_selected) MSE = mean_squared_error(label_testing, label_predicted) print('%s MSE %.5f' % (name, MSE)) <jupyter_output>lasso lr MSE 0.02910 lasso lr normalized MSE 0.02931 ridge lr MSE 0.02919 <jupyter_text># PCAPCA is performed using selected training data and testing data<jupyter_code>def pca_train(X_train, X_test, y_train, y_test, n_component): pca = PCA(n_components=n_component) pca.fit(X_train) X_train = pca.transform(X_train) X_test = pca.transform(X_test) lr = LinearRegression() lr.fit(X_train, y_train) y_predicted = lr.predict(X_test) mse = mean_squared_error(y_test, y_predicted) return mse n_components = np.arange(1, n_features, 1) kf = KFold(n_splits=5) mse_cv = np.array([]) #cross valication on validation set for n_component in n_components: mse_eachfold = np.array([]) for train, test in kf.split(train_selected): X_train, X_test, y_train, y_test = train_selected[train, :], train_selected[test, :], label_training[train], label_training[test] # print(len(train), len(test)) mse = pca_train(X_train, X_test, y_train, y_test, n_component) mse_eachfold = np.append(mse_eachfold, mse) mse_cv = np.append(mse_cv, np.mean(mse_eachfold)) print(mse_cv) optimal_n_component = n_components[np.argmin(mse_cv)] print('optimal number of components %d' % optimal_n_component) print('optimal mse %.5f' % pca_train(train_selected, test_selected, label_training, label_testing, optimal_n_component)) <jupyter_output>[ 0.04414759 0.04296501 0.04295841 0.04291802 0.04158743 0.03894716 0.03873502 0.03866984 0.03754089 0.03774859] optimal number of components 9 optimal mse 0.02922 <jupyter_text>PCA is performed using original data<jupyter_code>n_components = np.arange(1, train_imputed.shape[1], 10) kf = KFold(n_splits=5) mse_cv = np.array([]) #cross valication on validation set for n_component in n_components: mse_eachfold = np.array([]) for train, test in kf.split(train_imputed): X_train, X_test, y_train, y_test = train_imputed[train, :], train_imputed[test, :], label_training[train], label_training[test] # print(len(train), len(test)) mse = pca_train(X_train, X_test, y_train, y_test, n_component) mse_eachfold = np.append(mse_eachfold, mse) mse_cv = np.append(mse_cv, np.mean(mse_eachfold)) print(mse_cv) optimal_n_component = n_components[np.argmin(mse_cv)] print('optimal number of components %d' % optimal_n_component) print('optimal mse %.5f' % pca_train(train_imputed, test_imputed, label_training, label_testing, optimal_n_component)) <jupyter_output>[ 0.03790095 0.02001046 0.02027903 0.01990308 0.01996262 0.01986969 0.01976377 0.01972806 0.01974705 0.01946261 0.01957579 0.01987876 0.02054914] optimal number of components 91 optimal mse 0.01839 <jupyter_text># XGBoost at each leaf, a l1-norm linear regression model is trained.<jupyter_code>import xgboost as xgb from sklearn.model_selection import cross_val_score train_imputed alphas = np.logspace(-4, 2, 15) # X_ = np.arange(10000).reshape(250, 40).astype(np.float) # y_ = np.arange(250) # print(X_.dtype) # clf = xgb.XGBRegressor(reg_alpha=1, reg_lambda=0) # clf = xgb.XGBClassifier() # clf.fit(X_, y_) # label_predicted = clf.predict(X_) # mse = mean_squared_error(label_predicted, y_) xgb_r = [] for alpha in alphas: clf = xgb.XGBRegressor(reg_alpha=alpha, reg_lambda=0) xgb_r.append(np.mean(cross_val_score(clf, train_imputed, label_training, cv=10))) # clf = xgb.XGBRegressor(reg_alpha=1, reg_lambda=0) # clf.fit(train_imputed, label_training) # label_predicted = clf.predict(test_imputed) # mse = mean_squared_error(label_predicted, label_testing) xgb_r<jupyter_output><empty_output><jupyter_text>* xscale function Alter the scale of x axis <jupyter_code>plt.figure() plt.xscale('log') plt.xlabel('alphas') plt.ylabel('R**2') plt.plot(alphas, xgb_r) plt.grid(True) plt.show() xgb_clf = xgb.XGBRegressor(reg_alpha=1, reg_lambda=0) xgb_clf.fit(train_imputed, label_training) label_predicted = xgb_clf.predict(test_imputed) mse = mean_squared_error(label_predicted, label_testing) mse<jupyter_output><empty_output>
no_license
/project_3/.ipynb_checkpoints/hw3-checkpoint.ipynb
Czzzzzzzz/ML_Projects
10
<jupyter_start><jupyter_text>Visual Question Answering (VQA)<jupyter_code>#installing packages (since i'm using google colab,i'm installing from notebook itself) #importing packages import warnings warnings.filterwarnings("ignore") import os import pandas as pd import numpy as np import json import seaborn as sns import tensorflow as tf %pylab inline import matplotlib.pyplot as plt import matplotlib.image as mpimg from wordcloud import WordCloud from google.colab import drive<jupyter_output>Populating the interactive namespace from numpy and matplotlib <jupyter_text>Mounting the Drive <jupyter_code>drive.mount('/content/drive/', force_remount=True)<jupyter_output>Mounted at /content/drive/ <jupyter_text>Variables<jupyter_code>currentDirectory = "/content/drive/My Drive/pcase_study_2/" os.chdir(currentDirectory) currentDirectory = "" dataDirectory = currentDirectory + "data/" imageDirectory = dataDirectory + "train2014/" question_file_path = dataDirectory + 'v2_OpenEnded_mscoco_train2014_questions.json' annotation_file_path = dataDirectory + 'v2_mscoco_train2014_annotations.json'<jupyter_output><empty_output><jupyter_text>#1. Data Tranformation## 1.1 Loading the Questions and Annotations<jupyter_code>with open(question_file_path, 'r') as f: questions = json.load(f) questions = questions["questions"] with open(annotation_file_path, 'r') as f: annotations = json.load(f) annotations = annotations["annotations"] print("Total Number Questions is : ",len(questions)) questions_df = pd.DataFrame(questions) questions_df.head(5) annotations_df = pd.DataFrame(annotations) annotations_df.head(5)<jupyter_output><empty_output><jupyter_text>## 1.2 Merging Questions and Annotations<jupyter_code>data = pd.merge(questions_df,annotations_df, how='inner', left_on=['image_id','question_id'], right_on = ['image_id','question_id']) data.head(5)<jupyter_output><empty_output><jupyter_text># 2. EDA <jupyter_code>imageDirectory = dataDirectory + "train2014/" os.chdir(imageDirectory) imageDirectory = "" index = np.random.randint(0,len(data))#263115 img_path = imageDirectory + 'COCO_train2014_' + '%012d.jpg' % (data['image_id'][index]) img=mpimg.imread(img_path) imgplot = plt.imshow(img) plt.axis('off') plt.show() print("*"*50) print("Question : " ,data['question'][index]) print("*"*50) print("Answer : ", data['multiple_choice_answer'][index]) print(img.shape)<jupyter_output><empty_output><jupyter_text>## 2.1 Images <jupyter_code>aggregations = {'question': 'count'} temp = pd.DataFrame(data.groupby(['image_id'],as_index=False).agg(aggregations)) num_of_ques_in_image = temp['question'].values print("Max number of questions on a image",max(num_of_ques_in_image)) print("Min number of questions on a image",min(num_of_ques_in_image)) print("Mean of questions on a image",np.mean(num_of_ques_in_image)) ax = sns.boxplot(y = 'question', data = temp) plt.title("boxplot of Number of questions on a iage") plt.show()<jupyter_output>Max number of questions on a image 275 Min number of questions on a image 3 Mean of questions on a image 5.3604846405663 <jupyter_text> Image that have maximum number of questions <jupyter_code>img_path = imageDirectory + 'COCO_train2014_' + '%012d.jpg' % (temp[temp['question'] == 275]['image_id'].values[0]) img=mpimg.imread(img_path) imgplot = plt.imshow(img) plt.axis('off') plt.show()<jupyter_output><empty_output><jupyter_text>## 2.2 Duplicate Questions on same Image<jupyter_code>aggregations = {'question_id':'count', 'multiple_choice_answer': lambda x: " || ".join(x)} temp = pd.DataFrame(data.groupby(['image_id','question'],as_index=False).agg(aggregations)).rename(columns={'question_id':'count'}) temp = temp[temp['count']>1] temp<jupyter_output><empty_output><jupyter_text>## 2.3 Question Type <jupyter_code>data.question_type.unique()<jupyter_output><empty_output><jupyter_text>There are many variety of question types, including "What is...", "Is there..","How many...", and "Does the...". A particularly interesting type of question is "What is..." questions, since they have a diverse set of possible answer.<jupyter_code>print("Number of unique Question type in dataset : ",len(data.question_type.unique())) def getFrequnctDict(data,column,isJoin=False): column_frequency = {} for _row in data[column]: if isJoin: _row = "_".join(_row.split()) if(column_frequency.get(_row,-1) > 0): column_frequency[_row] += 1 else: column_frequency[_row] = 1 return column_frequency def lineChart(data,column,top=20,isJoin=False): column_frequncy = getFrequnctDict(data,column,isJoin) sort_column_frequncy = sorted(list(column_frequncy.items()),key = lambda x: x[1],reverse=True) total_samples = len(data) plt.plot([x[1]for x in sort_column_frequncy[:top]]) i=np.arange(top) plt.title("Frequency of top " + str(top) + " " + column ) plt.xlabel("Tags") plt.ylabel("Counts") plt.xticks(i,[x[0] for x in sort_column_frequncy[:top]]) plt.xticks(rotation=90) plt.show() return sort_column_frequncy def plotWordCloud(data,column,isJoin=False): column_frequncy = getFrequnctDict(data,column,isJoin) #https://www.geeksforgeeks.org/generating-word-cloud-python/ wordcloud = WordCloud(width = 800, height = 800, background_color ='white', stopwords = None, min_font_size = 10).generate_from_frequencies(column_frequncy) # plot the WordCloud image plt.figure(figsize = (8, 8), facecolor = None) plt.imshow(wordcloud) plt.axis("off") plt.tight_layout(pad = 0) plt.title("WordCloud on "+ column) plt.show() plotWordCloud(data, 'question_type') question_type_frequncy = lineChart(data, 'question_type', top = 30) for _type,_count in question_type_frequncy[:10]: print("Percentage of '" + _type + "' Type of Questions in Dataset is ", str(100*_count/len(data)) )<jupyter_output>Percentage of 'how many' Type of Questions in Dataset is 9.541032592161926 Percentage of 'is the' Type of Questions in Dataset is 7.8707490811412555 Percentage of 'what' Type of Questions in Dataset is 7.7988628911769275 Percentage of 'what color is the' Type of Questions in Dataset is 6.301196375493795 Percentage of 'what is the' Type of Questions in Dataset is 5.521490365222408 Percentage of 'none of the above' Type of Questions in Dataset is 3.8248410729295537 Percentage of 'is this' Type of Questions in Dataset is 3.7056316858100264 Percentage of 'is this a' Type of Questions in Dataset is 3.610985291499627 Percentage of 'what is' Type of Questions in Dataset is 3.0559517934364977 Percentage of 'what kind of' Type of Questions in Dataset is 2.522101059814268 <jupyter_text>As we can see from above plots "how many", "is the", "what", "what is color the", "what is the" are most frequent question types Among 65 diffrent question types "how many" type of questions covers 9.5% of the data## 2.4 Answers <jupyter_code>data['answer_type'].unique() answer_type_frequncy = lineChart(data, 'answer_type', top = 3) for _type,_count in answer_type_frequncy: print("Percentage of '" + _type + "' Type of Answers in Dataset is ", str(100*_count/len(data)) )<jupyter_output>Percentage of 'other' Type of Answers in Dataset is 49.41195293820717 Percentage of 'yes/no' Type of Answers in Dataset is 37.60661803644788 Percentage of 'number' Type of Answers in Dataset is 12.981429025344953 <jupyter_text> As we can see just yes/no question covers almost 38% of the data <jupyter_code>sns.countplot(data["multiple_choice_answer"].apply(lambda x: len(x.split())).values) plt.title("Number of words in Answers vs Distrubution") plt.xlabel("Number of words in Answers") plt.ylabel("Distrubution") plt.show()<jupyter_output><empty_output><jupyter_text>Most answers consist of a single word, with the distribution of answers containing one, two, or three words, respectively being 89.32%, 6.91%, and 2.74% ## 2.5 Question <jupyter_code>sns.countplot(data["question"].apply(lambda x: len(x.split())).values) plt.title("Length of the questions vs Distrubution") plt.xlabel("Length of the questions") plt.ylabel("Distrubution") plt.show()<jupyter_output><empty_output><jupyter_text>Most of the questions have the length between 4 to 10.## 2.6 Question Type and Answer<jupyter_code>#https://stackoverflow.com/questions/47600818/python-pandas-groupby-sum-and-concatenate-strings temp = data top_question = [x[0] for x in question_type_frequncy[:50]] top_aswers = [x[0] for x in answer_frequncy[:50]] #temp = data[(data['question_type'].isin(top_question) | data['multiple_choice_answer'].isin(top_aswers))] aggregations = {'question': 'count'} temp = pd.DataFrame(temp.groupby(['question_type','multiple_choice_answer'],as_index=False).agg(aggregations)) temp = temp[temp['question']>=10] temp = temp.pivot(index='question_type', columns='multiple_choice_answer', values='question') ax = temp.loc[:,top_aswers].plot.bar(stacked=True, figsize=(18,7)) ax.legend(ncol = 7) plt.title('Question Type vs Answers') plt.show()<jupyter_output><empty_output><jupyter_text> We can see that a number of question types, such as "Is the...", "Are...", and "Does..." are typically answered using "yes" and "no" as answers. Other questions such as "What is..." and "What type..." have a rich diversity of responses. Other question types such as "What color... " or "Which..." have more specialized responses,such as colors, or "left" and "right".## 2.7 Question Type vs Answer<jupyter_code>fig = plt.figure(figsize=(80,30)) fig.tight_layout() count = 1 colorCodes = [ "#E18719", "#287819", "#2D6EAA", "#E6AF23", "#666666","#724D8D", "#EAAB5E", "#73A769","#93785F", "#C97B7B", "#81A8CC", "#EDC765", "#858585","#957AA9", "#F3CFA3","#B4D0AF", "#BEADA0", "#E4BDBD", "#ABC5DD", "#F4DB9C", "#A3A3A3"] for _type,_ in question_type_frequncy[:12]: percentage = str(round((len(data[data['question_type']==_type])/len(data))*100,1))+'%' plt.subplot(4, 3, count) temp = data[data['question_type']==_type] ax = temp['multiple_choice_answer'].value_counts()[:10][::-1].plot(kind='barh', figsize=(20,15),color=colorCodes[count-1], fontsize=13) ax.set_alpha(0.8) ax.set_title("Question Type: '" + _type + "' (" + percentage + ") vs Answer" , fontsize=18) ax.set_ylabel("Answers", fontsize=18) ax.get_xaxis().set_visible(False) for i in ax.patches: ax.text(i.get_width()/2, i.get_y(), str(round((i.get_width()/len(temp))*100, 2))+'%' + "(" + str(round((i.get_width()/len(data))*100, 2))+'%' +")", fontsize=10,color='black') count += 1 fig.tight_layout() plt.show()<jupyter_output><empty_output><jupyter_text>## 2.8 Answer vs Question Type<jupyter_code>fig = plt.figure() fig.tight_layout() count = 1 colorCodes = [ "#E18719", "#287819", "#2D6EAA", "#E6AF23", "#666666","#724D8D", "#EAAB5E", "#73A769","#93785F", "#C97B7B", "#81A8CC", "#EDC765", "#858585","#957AA9", "#F3CFA3","#B4D0AF", "#BEADA0", "#E4BDBD", "#ABC5DD", "#F4DB9C", "#A3A3A3"] answer_frequncy = sorted(list(getFrequnctDict(data,'multiple_choice_answer').items()),key = lambda x: x[1],reverse=True) for _type,_ in answer_frequncy[:12]: percentage = str(round((len(data[data['multiple_choice_answer']==_type])/len(data))*100,1))+'%' plt.subplot(4, 3, count) temp = data[data['multiple_choice_answer']==_type] ax = temp['question_type'].value_counts()[:10][::-1].plot(kind='barh', figsize=(20,15),color=colorCodes[count-1], fontsize=13) ax.set_alpha(0.8) ax.set_title("Answer: '" + _type + "' (" + percentage + ") vs Question Type" , fontsize=18) ax.set_ylabel("Question Type", fontsize=18) ax.get_xaxis().set_visible(False) for i in ax.patches: ax.text(i.get_width()/2, i.get_y(), str(round((i.get_width()/len(temp))*100, 2))+'%' + "(" + str(round((i.get_width()/len(data))*100, 2))+'%' +")", fontsize=14,color='black') count += 1 fig.tight_layout() plt.show()<jupyter_output><empty_output><jupyter_text>## 2.9 checking if acutal answer is same as persons answers <jupyter_code>def getPeopleAnswer(answers): answers_dict = {} score_dict = { 'yes' : 3, 'maybe' : 2, 'no' : 1 } for _answer in answers: score = score_dict[_answer['answer_confidence']] if answers_dict.get(_answer['answer'],-1) != -1 : answers_dict[_answer['answer']] += score else: answers_dict[_answer['answer']] = score return sorted(list(answers_dict.items()),key = lambda x: x[1],reverse=True)[0][0] data['derived_answer'] = data["answers"].apply(lambda x: getPeopleAnswer(x)) data[ data['derived_answer'] != data['multiple_choice_answer']]<jupyter_output><empty_output><jupyter_text># 3. Saving final Data <jupyter_code>data.to_csv(dataDirectory + 'data.csv')<jupyter_output><empty_output>
no_license
/2_Data_preprocessing_EDA.ipynb
harsha977/Visual-Question-Answering-With-Hierarchical-Question-Image-Co-Attention
19
<jupyter_start><jupyter_text># Working with Categorical and Numerical Data### Using Label Encoding and One Hot Encoding for categorical data; Apply scalling to numeric data<jupyter_code>import pandas as pd print(pd.__version__) exam_data = pd.read_csv('../data/exams.csv', quotechar = '"') exam_data math_average = exam_data['math score'].mean() reading_average = exam_data['reading score'].mean() writing_average = exam_data['writing score'].mean() print('Math Avg: ', math_average) print('Reading Avg: ', reading_average) print('Writing Avg: ', writing_average)<jupyter_output>Math Avg: 65.06 Reading Avg: 67.28 Writing Avg: 66.47 <jupyter_text>## Data Standardization:#### Apply scalling on the test scores to express them in term of z-scores #### Z-score is the expression of a value in terms of the number of standard deviation from the means #### The effect is to give a score which is relative to the distribution of values for that column<jupyter_code>from sklearn import preprocessing exam_data[['math score']] = preprocessing.scale(exam_data[['math score']]) exam_data[['reading score']] = preprocessing.scale(exam_data[['reading score']]) exam_data[['writing score']] = preprocessing.scale(exam_data[['writing score']]) exam_data math_average = exam_data['math score'].mean() reading_average = exam_data['reading score'].mean() writing_average = exam_data['writing score'].mean() print('Math Avg: ', math_average) print('Reading Avg: ', reading_average) print('Writing Avg: ', writing_average)<jupyter_output>Math Avg: -1.5693175925424186e-16 Reading Avg: -1.4432899320127036e-16 Writing Avg: 6.217248937900877e-17 <jupyter_text>## Label Encoding<jupyter_code>le = preprocessing.LabelEncoder() exam_data['gender'] = le.fit_transform(exam_data['gender'].astype(str)) exam_data.head() le.classes_<jupyter_output><empty_output><jupyter_text>## One Hot Encoding### One-Hot Encoding: * Use when there is no meaningful comparison between values in the column * Creates a new column for each unique value for the specified feature in the data set<jupyter_code>pd.get_dummies(exam_data['race/ethnicity']) exam_data.head() exam_data = pd.get_dummies(exam_data, columns=['parental level of education', 'lunch', 'test preparation course']) exam_data.head()<jupyter_output><empty_output>
no_license
/Module 1 - Processing Data/code/.ipynb_checkpoints/m1-demo1-CategoricalAndNumericData-checkpoint.ipynb
tehmeerali786/Building-Machine-Learning-Models
4
<jupyter_start><jupyter_text>### Import each table and read it <jupyter_code>#read "Employee" sql and clean the data employees = pd.read_sql('SELECT * FROM "Employees"',connection) #rename them to read easily employees_rename = employees.rename(columns={"emp_no": "Employee Number", "emp_title":"Title ID", "bith_date": "Birth Date", "first_name" : "First Name", "last_name": "Last Name", "sex": "Sex", "hire_date": "Hire Date"}) employees_rename.head() #read "Departments_Employees" sql and clean the data dep_emp = pd.read_sql('SELECT * FROM "Departments_Employees"',connection) #rename them to read easily dep_emp_rename = dep_emp.rename(columns={"emp_no": "Employee Number", "dept_no":"Department_Number"}) dep_emp_rename.head() #read each sql file salaries = pd.read_sql('SELECT * FROM "Salaries"',connection) #rename them to read easily salaries_rename = salaries.rename(columns={"emp_no": "Employee Number", "salary":"Salaries"}) salaries_rename.head() #read each sql file titles = pd.read_sql('SELECT * FROM "Titles"',connection) titles #rename them to read easily titles_rename = titles.rename(columns={"title_id":"Title ID", "title": "Title"}) titles_rename.head() #read each sql file departments = pd.read_sql('SELECT * FROM "Departments"',connection) #rename them to read easily departments_rename = departments.rename(columns={"dept_no":"Department_Number", "dept_name": "Department Name"}) departments_rename.head() #read each sql file dep_manager = pd.read_sql('SELECT * FROM "Department_Manager"',connection) #rename them to read easily dep_manager_rename = dep_manager.rename(columns={"dept_no":"Department Number","emp_no": "Employee Number"}) dep_manager_rename.head()<jupyter_output><empty_output><jupyter_text>### Histogram - common salary ranges<jupyter_code>#Create a histogram to visualize the most common salary ranges for employees. x = salaries_rename['Salaries'] plt.hist(x, bins = 20, color = 'b') plt.xlabel("Salary") plt.ylabel("Number of Employees") plt.show() plt.savefig("The Most Common Salary Ranges.png")<jupyter_output><empty_output><jupyter_text>### Combine the data<jupyter_code>#Create a bar chart of average salary by title. # To do that, we need salary and title in the same dataframe # titles, employees, salaries - they need to be merged titles_employees = pd. merge (titles_rename, employees_rename, on = "Title ID", how = "inner") # salaries_titles = pd.merge(salaries_clean, titles_clean, on="Employee Number", how="inner") titles_employees merged_data = pd. merge (titles_employees, salaries_rename, on = "Employee Number", how = "inner") # salaries_titles = pd.merge(salaries_clean, titles_clean, on="Employee Number", how="inner") merged_data #Use 'groupby' to set the titles as the first column merged_data_groupby = merged_data.groupby("Title").mean() merged_data_groupby # Drop "Employee Number" which is unnecessary titles_salaries = merged_data_groupby.drop(columns = "Employee Number") titles_salaries #a bar chart of average salary by title. titles_salaries.plot.bar(align="center", color=['lightblue'], title = "Average Salary by Title", edgecolor='darkgray', rot = 45) plt.grid(True) plt.ylim((0,70000)) plt.savefig("Average Salary by Title.png")<jupyter_output><empty_output><jupyter_text>### Epilogue#Evidence in hand, you march into your boss's office and present the visualization. With a sly grin, your boss thanks you for your work. On your way out of the office, you hear the words, "Search your ID number." You look down at your badge to see that your employee ID number is 499942.<jupyter_code>#Find the employee ID number employees_rename.loc[employees_rename["Employee Number"] == 499942] #April Fools? LOL <jupyter_output><empty_output>
no_license
/Visualization_postgres_final.ipynb
miheep8938/postgresql-challenge
4
<jupyter_start><jupyter_text># DBSCANthis algorithm works by picking some point and find all point within a distance $\epsilon$ (epsilon). If the number of point is less than __min samples__ then the point is labeled as _noise_. if the number is more than __min samples__ than the point is labeled as _core point_ and then visit the neighbors. if the neighbors is not assigned yet then assign the label. If it's a core point then visit the neighbors. Repeat until all of the points has assigned.<jupyter_code>import mglearn mglearn.plots.plot_dbscan() # using DBSCAN with synthetic datasets from sklearn.cluster import DBSCAN from sklearn.datasets import make_moons from sklearn.preprocessing import StandardScaler import matplotlib.pyplot as plt X, y = make_moons(n_samples=200, noise=.05, random_state=0) # rescale data to zero mean and 1 variance scaler = StandardScaler().fit(X) X_scaled = scaler.transform(X) dbscan = DBSCAN() # the dbscan cannot fit new data to cluster # so it just have fit_predict method cluster = dbscan.fit_predict(X_scaled) # plotting plt.scatter(X_scaled[:,0], X_scaled[:, 1], c=cluster, s=60) plt.xlabel("feature 0") plt.ylabel("feature 1")<jupyter_output><empty_output>
no_license
/chapter3-dbscan.ipynb
miqdude/introductiontomachinelearning
1
<jupyter_start><jupyter_text>## Introduction to Pandas ## We will learn: a) What is a DataFrame? b) How to read data from clipboard to a DataFrame ? c) How to read data from file to a DataFrame ? d) Let's code together - to understand object types.<jupyter_code>#Remember these will become the standard imports from now on! import numpy as np from pandas import Series,DataFrame import pandas as pd #Now we'll learn DataFrames #Let's get some data to play with. How about the NFL? import webbrowser website = 'C:\Users\GAUTHAM\Desktop\anaconda\train.csv' webbrowser.open(website) #Copy and read to get data nfl_frame = pd.read_clipboard() type(nfl_frame) #Show nfl_frame # We can grab the column names with .columns nfl_frame.columns nfl_frame.head(3) nfl_frame.tail(1)<jupyter_output><empty_output><jupyter_text>### Read data from a CSV file ###<jupyter_code>trainingData = pd.read_csv("train.csv") trainingData.head(3) # LET'S DO IT TOGETHER: # a) Try printing out the last 15 values in the trainingData. trainingData.tail(1) # b) Print the 'PassengerId' column where the PassengerId is less than 10 # Hint: DataFrame can accept conditions such as: PassengerId < 10. series = trainingData["PassengerId"] trainingData[series<10] # c) Try printing out a non-existent column. trainingData["test"] # d) Try adding a Series say Alive/Dead to the DataFrame trainingData["Alive/Dead"] = 'Dead' trainingData.head() aliveDead = Series(["Alive","Alive"],index=[4,0]) trainingData["Alive/Dead"] = aliveDead trainingData.head() # Example: # stadiums = Series(["Levi's Stadium","AT&T Stadium"],index=[4,0]) # nfl_frame['Stadium']=stadiums type(trainingData) # e) Check what these return: # 1)type(trainingData) - note the datatype # 2)trainingData.info - note the starting index type(trainingData) trainingData.info # f) Try out: # 1) M = trainingData.as_matrix() # 2) Check the datatype of M # 3) Print trainingData[0,0] and trainingData[0] # 4) Check the datatype of trainingData[0] # g) Try out: # 1) Print trainingData.iloc[0] # 2) Print trainingData.ix[0] # 3) Check the datatype of trainingData.ix[0] M = trainingData.as_matrix() type(M)<jupyter_output><empty_output>
no_license
/Workshop_3_Reading_Data_using_Pandas.ipynb
Gdarkstar/data-cleaning
2
<jupyter_start><jupyter_text># Guided Project: Building a Spam Filter with Naive Bayes## IntroductionOur goal for this project is to create a spam filter for SMS messages wich has an accuracy over 80%. To do that, we'll use the multinomial Naive Bayes algorithm along with a dataset of 5,572 SMS messages that were already classified by humans.## The DataThe dataset we will be using was put together by Tiago A. Almeida and José María Gómez Hidalgo, and it can be downloaded from the [The UCI Machine Learning Repository](https://archive.ics.uci.edu/ml/datasets/sms+spam+collection). The data collection process is described in more details on this [page](http://www.dt.fee.unicamp.br/~tiago/smsspamcollection/#composition), where we can also find some of the authors' papers.## Inital Data ExplorationLet's start by reading in the dataset.<jupyter_code>import pandas as pd import warnings warnings.filterwarnings("ignore") sms_spam = pd.read_csv('SMSSpamCollection', sep='\t', names=['Label', 'SMS']) print(sms_spam.shape) sms_spam.head()<jupyter_output>(5572, 2) <jupyter_text>Let's find out what percentage of the messages is spam and what percentage is ham ("ham" means non-spam).<jupyter_code>sms_spam["Label"].value_counts(normalize=True)*100<jupyter_output><empty_output><jupyter_text>About 87% of the messages are ham and the remaining 13% are spam.## Splitting our dataset for training and testingOnce our spam filter is done, we'll need to test how good it is with classifying new messages. So before creating it, we split our dataset into two categories: * A **training set**, which we'll use to "train" the computer how to classify messages. * A **test set**, which we'll use to test how good the spam filter is with classifying new messages. We're going to keep 80% of our dataset for training, and 20% for testing (we want to train the algorithm on as much data as possible, but we also want to have enough test data). The dataset has 5,572 messages, which means that: * The training set will have 4,458 messages (about 80% of the dataset). * The test set will have 1,114 messages (about 20% of the dataset). For this project, our goal is to create a spam filter that classifies new messages with an accuracy greater than 80% — so we expect that more than 80% of the new messages will be classified correctly as spam or ham (non-spam). Let's create a training and a test set! We start by randomizing the entire dataset.<jupyter_code>randomized_data = sms_spam.sample(frac=1, random_state=1)<jupyter_output><empty_output><jupyter_text>We then split the randomized dataset into a training and a test set.<jupyter_code># Calculate the index for split training_test_index = round(len(randomized_data)*0.8) # Training/Test split training_set = randomized_data[:training_test_index].reset_index(drop=True) print(training_set.shape) test_set = randomized_data[training_test_index:].reset_index(drop=True) print(test_set.shape)<jupyter_output>(4458, 2) (1114, 2) <jupyter_text>Next, we calculate the percentages of spam and ham in each of these sets to see if they are representative of the full dataset.<jupyter_code>training_set["Label"].value_counts(normalize=True)*100 test_set["Label"].value_counts(normalize=True)*100<jupyter_output><empty_output><jupyter_text>The proportions of ham and spam are very similar for both the training and testing sets and also similar to the ones of the full dataset.## Cleaning the DataBefore calculating all the probabilities required for the Naive Bayes algorithm, we first need to perform a bit of data cleaning. This will bring the data in a format that will allow us to extract easily all the information we need. Essentially, we want to end up with a dataframe containing for each sms (each row) the counts of the number of times each word from the vocabulary (all unique words listed from all the sms) appear. Let's print the head of the dataframe again and see what cleaning actions to take first.<jupyter_code># Before cleaning training_set.head()<jupyter_output><empty_output><jupyter_text>To be able to later generate the training_set vocabulary, we first need to clean the messages in the `SMS` column.### Letter Case and PunctuationWhat we are going to do first is: * Remove all the punctuation form * Transform every letter in every word to lower case<jupyter_code>training_set["SMS"] = training_set["SMS"].str.replace('\W',' ') training_set["SMS"] = training_set["SMS"].str.lower() # After cleaning training_set.head()<jupyter_output><empty_output><jupyter_text>### Creation of the VocabularyWe can then create a vocabulary for the messages in the training set.<jupyter_code>training_set["SMS"] = training_set["SMS"].str.split() vocabulary = [] for index, row in training_set.iterrows(): for word in row["SMS"]: if word not in vocabulary: vocabulary.append(word) print(vocabulary)<jupyter_output>['yep', 'by', 'the', 'pretty', 'sculpture', 'yes', 'princess', 'are', 'you', 'going', 'to', 'make', 'me', 'moan', 'welp', 'apparently', 'he', 'retired', 'havent', 'i', 'forgot', '2', 'ask', 'ü', 'all', 'smth', 'there', 's', 'a', 'card', 'on', 'da', 'present', 'lei', 'how', 'want', 'write', 'or', 'sign', 'it', 'ok', 'thk', 'got', 'then', 'u', 'wan', 'come', 'now', 'wat', 'kfc', 'its', 'tuesday', 'only', 'buy', 'meals', 'no', 'gravy', 'mark', 'dear', 'was', 'sleeping', 'p', 'pa', 'nothing', 'problem', 'ill', 'be', 'lt', 'gt', 'my', 'uncles', 'in', 'atlanta', 'wish', 'guys', 'great', 'semester', 'phone', 'which', 'your', 'another', 'number', 'greatest', 'test', 'of', 'courage', 'earth', 'is', 'bear', 'defeat', 'without', 'losing', 'heart', 'gn', 'tc', 'dai', 'what', 'this', 'can', 'send', 'resume', 'id', 'am', 'late', 'will', 'at', 'freemsg', 'why', 'haven', 't', 'replied', 'text', 'm', 'randy', 'sexy', 'female', 'and', 'live', 'local', 'luv', 'hear', 'from', 'netcollex', 'ltd', '08700621[...]<jupyter_text>We were able to successfully generate the vocabulary list for our training set. This list contains 7783 elements, meaning that messages in the training set are made from a list of 7783 unique words.### The Finale Training SetNext, we use this vocabulary we created to make the data transformation. Eventually, we're going to create a new DataFrame. However, we'll first build a dictionary that we'll then convert to the DataFrame we need. In the block of code below, we: * start by initializing a dictionary named `word_counts_per_sms`, where each key is a unique word (a string) from the vocabulary, and each value is a list of the length of training set, where each element in the list is a 0. * loop over `training_set['SMS']` using at the same time the `enumerate()` function to get both the index and the SMS message (index and sms). * using a nested loop, we loop over `sms` (where `sms` is a list of strings, where each string represents a word in a message). * we increment `word_counts_per_sms[word][index]` by 1. <jupyter_code>word_counts_per_sms = {unique_word: [0] * len(training_set['SMS']) for unique_word in vocabulary} for index, sms in enumerate(training_set['SMS']): for word in sms: word_counts_per_sms[word][index] += 1 word_counts = pd.DataFrame(word_counts_per_sms) word_counts.head()<jupyter_output><empty_output><jupyter_text>The `word_counts` dataframe was successfully created. However, it misses the full messages and, more importantly, the labels. In the following block of code, we add those two columns to our `word_counts` dataframe. We rename the resulting dataframe `training_set_clean`.<jupyter_code>training_set_clean = pd.concat([training_set["Label"], training_set["SMS"], word_counts], axis=1) training_set_clean.head()<jupyter_output><empty_output><jupyter_text>Now that we're done with data cleaning and have a training set to work with, we can begin creating the spam filter.## Building the Spam Filter### Calculation of constantsRecall that our spam filter will be based on a multinomial Naive Bayes algorithm. This algorithm needs to know the probability values of the two following equations to be able to classify new messages: $$P(Spam|w_{1}, w_{2},..., w_{n}) \propto P(Spam) \cdot \prod_{i = 1}^{n} P(w_{i}|Spam)$$ $$P(Ham|w_{1}, w_{2},..., w_{n}) \propto P(Ham) \cdot \prod_{i = 1}^{n} P(w_{i}|Ham)$$ Also, to calculate P(wi|Spam) and P(wi|Ham) inside the formulas above, recall that we need to use these equations: $$P(w_{i}|Spam) = \frac{N_{w_{i}|Spam}+\alpha }{N_{Spam}+\alpha \cdot N_{Vocabulary}}$$ $$P(w_{i}|Ham) = \frac{N_{w_{i}|Ham}+\alpha }{N_{Ham}+\alpha \cdot N_{Vocabulary}}$$ Some of the terms in the four equations above will have the same value for every new message. As a start, let's first calculate: * P(Spam) and P(Ham) * NSpam, NHam, NVocabulary NSpam = number of words in all the spam messages NHam = number of words in all the non-spam messages We'll also use Laplace smoothing and set $\alpha$ = 1. In the next blocks of code, and for the training set only, we: * calculate P(Spam) and P(Ham) * calculate NSpam, NHam and NVocabulary * initiate a variable named `alpha` with a value of 1<jupyter_code># Get all spam messages from the training set spams = training_set_clean[training_set_clean["Label"] == 'spam'] # Get all ham messages from the training set hams = training_set_clean[training_set_clean["Label"] == 'ham'] p_spam = len(spams) / len(training_set_clean) p_ham = len(hams) / len(training_set_clean) print(p_spam) print(p_ham) # Total number of unique words in all messages of the training set n_vocabulary = len(vocabulary) # Number of words in all the spam messages n_spam = spams.iloc[:,2:].sum().sum().astype(int) # Number of words in all the ham messages n_ham = hams.iloc[:,2:].sum().sum().astype(int) print(n_vocabulary) print(n_spam) print(n_ham) # Initialization of the variable alpha for Laplace smoothing alpha = 1<jupyter_output><empty_output><jupyter_text>### Calculation of ParametersAll the terms we calculated thus far (`p_spam`, `p_ham`, `n_vocabulary`, `n_spam`, `n_ham`) will have constant values in our equations for every new message (regardless of the message or each individual word in the message). However, P(wi|Spam) and P(wi|Ham) will vary depending on the individual words. For instance, P("secret"|Spam) will have a certain probability value, while P("cousin"|Spam) or P("lovely"|Spam) will most likely have other values. Although both P(wi|Spam) and P(wi|Ham) vary depending on the word, the probability for each individual word is constant for every new message. For instance, let's say we receive two new messages: * "secret code" * "secret party 2night" We'll need to calculate P("secret"|Spam) for both these messages, and we can use the training set to get the values we need to find a result for the equation below: $$P("secret"|Spam) = \frac{N_{"secret"|Spam}+\alpha }{N_{Spam}+\alpha \cdot N_{Vocabulary}}$$ The steps we take to calculate P("secret"|Spam) will be identical for both of our new messages above, or for any other new message that contains the word "secret". The key detail here is that calculating P("secret"|Spam) only depends on the training set, and as long as we don't make changes to the training set, P("secret"|Spam) stays constant. The same reasoning also applies to P("secret"|Ham). This means that we can use our training set to calculate the probability for each word in our vocabulary. If our vocabulary contained only the words "lost", "navigate", and "sea", then we'd need to calculate six probabilities: * P("lost"|Spam) and P("lost"|Ham) * P("navigate"|Spam) and P("navigate"|Ham) * P("sea"|Spam) and P("sea"|Ham) We have 7,783 words in our vocabulary, which means we'll need to calculate a total of 15,566 probabilities. For each word, we need to calculate both P(wi|Spam) and P(wi|Ham). In more technical language, the probability values that P(wi|Spam) and P(wi|Ham) will take are called **parameters**. The fact that we calculate so many values before even beginning the classification of new messages makes the Naive Bayes algorithm very fast (especially compared to other algorithms). When a new message comes in, most of the needed computations are already done, which enables the algorithm to almost instantly classify the new message. If we didn't calculate all these values beforehand, then all these calculations would need to be done every time a new message comes in. Let's now calculate all the parameters using the equations below: $$P(w_{i}|Spam) = \frac{N_{w_{i}|Spam}+\alpha }{N_{Spam}+\alpha \cdot N_{Vocabulary}}$$ $$P(w_{i}|Ham) = \frac{N_{w_{i}|Ham}+\alpha }{N_{Ham}+\alpha \cdot N_{Vocabulary}}$$ We start by initializing two dictionaries, where each key-value pair is a unique word (from our vocabulary) represented as a string, and the value is 0. We'll need one dictionary to store the parameters for P(wi|Spam) and another for the parameters for P(wi|Ham).<jupyter_code>spam_params = {unique_word: 0 for unique_word in vocabulary} ham_params = {unique_word: 0 for unique_word in vocabulary}<jupyter_output><empty_output><jupyter_text>We then iterate over the vocabulary and, for each word, calculate P(wi|Spam) and P(wi|Ham) using the formulas we mentioned above.<jupyter_code>for word in vocabulary: n_word_spam = spams[word].sum() n_word_ham = hams[word].sum() p_word_given_spam = (n_word_spam + alpha) / (n_spam + alpha * n_vocabulary) p_word_given_ham = (n_word_ham + alpha) / (n_ham + alpha * n_vocabulary) spam_params[word] = p_word_given_spam ham_params[word] = p_word_given_ham<jupyter_output><empty_output><jupyter_text>Now that we have calculated all the constants and parameters we need, we can start creating the spam filter.### Creation of the FilterThe spam filter can be understood as a function that: * Takes in as input a new message (w1, w2, ..., wn) * Calculates P(Spam|w1, w2, ..., wn) and P(Ham|w1, w2, ..., wn) * Compares the values of P(Spam|w1, w2, ..., wn) and P(Ham|w1, w2, ..., wn), and: * If P(Ham|w1, w2, ..., wn) > P(Spam|w1, w2, ..., wn), then the message is classified as ham. * If P(Ham|w1, w2, ..., wn) 1, w2, ..., wn), then the message is classified as spam. * If P(Ham|w1, w2, ..., wn) = P(Spam|w1, w2, ..., wn), then the algorithm may request human help. Some new messages may contain words that are not part of the vocabulary. We will simply ignore these words when we're calculating the probabilities. Now we'll write the code for calculating `p_spam_given_message` and `p_ham_given_message`, and then we'll use the function to classify two new messages. After that, we'll classify all the 1,114 messages in our test set.<jupyter_code>import re def classify(message): # Cleaning of the new message message = re.sub('\W', ' ', message) message = message.lower() message = message.split() # Initialization of the variables p_spam_given_message = p_spam p_ham_given_message = p_ham for word in message: if word in spam_params: p_spam_given_message *= spam_params[word] if word in ham_params: p_ham_given_message *= ham_params[word] else: pass print('P(Spam|message):', p_spam_given_message) print('P(Ham|message):', p_ham_given_message) if p_ham_given_message > p_spam_given_message: print('Label: Ham') elif p_ham_given_message < p_spam_given_message: print('Label: Spam') else: print('Equal proabilities, have a human classify this!')<jupyter_output><empty_output><jupyter_text>We now test our `classify()` function on two new messages, one obviously spam and another one obviously ham. These two messages are the following: * 'WINNER!! This is the secret code to unlock the money: C3421.' * 'Sounds good, Tom, then see u there"<jupyter_code>test_message_1 = 'WINNER!! This is the secret code to unlock the money: C3421.' test_message_2 = 'Sounds good, Tom, then see u there' print(classify(test_message_1)) print(classify(test_message_2))<jupyter_output>P(Spam|message): 1.3481290211300841e-25 P(Ham|message): 1.9368049028589875e-27 Label: Spam None P(Spam|message): 2.4372375665888117e-25 P(Ham|message): 3.687530435009238e-21 Label: Ham None <jupyter_text>Our spam filter correctly classified the two messages. Let's now move to the proper validation phase!## Assessing the Filter's accuracyWe'll now try to determine how well the spam filter does on our test set of 1,114 messages. The algorithm will output a classification label for every message in our test set, which we'll be able to compare with the actual label. First off, we'll change the `classify()` function that we wrote previously to return the labels instead of printing them. <jupyter_code>def classify_test_set(message): # Cleaning of the new message message = re.sub('\W', ' ', message) message = message.lower() message = message.split() # Initialization of the variables p_spam_given_message = p_spam p_ham_given_message = p_ham for word in message: if word in spam_params: p_spam_given_message *= spam_params[word] if word in ham_params: p_ham_given_message *= ham_params[word] if p_ham_given_message > p_spam_given_message: return 'ham' elif p_ham_given_message < p_spam_given_message: return 'spam' else: return 'needs human classification'<jupyter_output><empty_output><jupyter_text>Now that we have a function that returns labels instead of printing them, we can use it to create a new column in our test set.<jupyter_code>test_set['predicted'] = test_set['SMS'].apply(classify_test_set) test_set.head(10)<jupyter_output><empty_output><jupyter_text>Looking at the head of `test_set`, it seems our filter did pretty well at classifying the messages. We can compare the predicted values with the actual labels for all 1,114 messages to measure how good our spam filter is with classifying new messages. To make the measurement, we'll use **accuracy** as a metric: $$Accuracy = \frac{number\:of\:correctly\:classified\:messages}{total\:number\:of\:classified\:messages}$$<jupyter_code>correct = 0 total = len(test_set) for index, row in test_set.iterrows(): if row["Label"] == row["predicted"]: correct += 1 accuracy = correct / total print(round(accuracy,4))<jupyter_output>0.9874
no_license
/Building a Spam Filter with Naive Bayes/Building a Spam Filter with Naive Bayes.ipynb
Antoine101/DataScience-Portfolio
18
<jupyter_start><jupyter_text>TMDB Box Office Prediction In this dataset, you are provided with 7398 movies and a variety of metadata obtained from The Movie Database (TMDB). Movies are labeled with id. Data points include cast, crew, plot keywords, budget, posters, release dates, languages, production companies, and countries. You are predicting the worldwide revenue for 4398 movies in the test file. Note - many movies are remade over the years, therefore it may seem like multiple instance of a movie may appear in the data, however they are different and should be considered separate movies. In addition, some movies may share a title, but be entirely unrelated.<jupyter_code>import pandas as pd import seaborn as sns import numpy as np from scipy import stats import matplotlib.pyplot as plt train= pd.read_csv("train.csv") test=pd.read_csv("test.csv") print(train.shape) print(test.shape) train.head(2) <jupyter_output><empty_output><jupyter_text>Check Quality of data<jupyter_code>train.columns[train.isnull().any()] train.dtypes train.drop(columns=['homepage','original_title','overview','poster_path','production_countries'],inplace=True) train.drop(columns=['spoken_languages','tagline','Keywords'],inplace=True) train[train['status']=='Rumored'] train.drop([609,1007,1216,1618],inplace=True) train[train['status']=='Rumored'].shape train.drop(columns=['status'],inplace=True) train.head() train.columns[train.isnull().any()] numeric_cols = ['id','budget','popularity','runtime','revenue'] categorical_cols = ['original_language'] object_cols = ['belongs_to_collection','genres','production_companies','title','cast','crew','imdb_id','release_date',]<jupyter_output><empty_output><jupyter_text>*******UNIVARIATE ANALYSIS********NUMERICAL COLUMNS<jupyter_code>train.shape sns.distplot(train["id"]) train['id'].skew() #train['belongs_to_collection'].unique() train[(train['belongs_to_collection'].isnull())].shape train['belongs_to_collection'].fillna(0,inplace=True) train[(train['belongs_to_collection'].isnull()) & (train['budget'] ==0)].shape train.loc[train['id'] == 1865,'revenue'] = 25000000 train.loc[train['id'] == 313,'revenue'] = 12000000 train.loc[train['id'] == 451,'revenue'] = 12000000 train.loc[train['id'] == 16,'revenue'] = 192864 train.loc[train['id'] == 90,'budget'] = 30000000 train.loc[train['id'] == 118,'budget'] = 60000000 train.loc[train['id'] == 149,'budget'] = 18000000 train.loc[train['id'] == 464,'budget'] = 20000000 train.loc[train['id'] == 470,'budget'] = 13000000 train.loc[train['id'] == 513,'budget'] = 930000 train.loc[train['id'] == 797,'budget'] = 8000000 train.loc[train['id'] == 819,'budget'] = 90000000 train.loc[train['id'] == 850,'budget'] = 90000000 train.loc[train['id'] == 1007,'budget'] = 2 train.loc[train['id'] == 1112,'budget'] = 7500000 train.loc[train['id'] == 1131,'budget'] = 4300000 train.loc[train['id'] == 1359,'budget'] = 10000000 train.loc[train['id'] == 1542,'budget'] = 1 train.loc[train['id'] == 1570,'budget'] = 15800000 train.loc[train['id'] == 1571,'budget'] = 4000000 train.loc[train['id'] == 1714,'budget'] = 46000000 train.loc[train['id'] == 1721,'budget'] = 17500000 train.loc[train['id'] == 1885,'budget'] = 12 train.loc[train['id'] == 2091,'budget'] = 10 train.loc[train['id'] == 2268,'budget'] = 17500000 train.loc[train['id'] == 2491,'budget'] = 6 train.loc[train['id'] == 2602,'budget'] = 31000000 train.loc[train['id'] == 2612,'budget'] = 15000000 train.loc[train['id'] == 2696,'budget'] = 10000000 train.loc[train['id'] == 2801,'budget'] = 10000000 train.loc[train['id'] == 335,'budget'] = 2 train.loc[train['id'] == 348,'budget'] = 12 train.loc[train['id'] == 470,'budget'] = 13000000 train.loc[train['id'] == 513,'budget'] = 1100000 train.loc[train['id'] == 640,'budget'] = 6 train.loc[train['id'] == 696,'budget'] = 1 train.loc[train['id'] == 797,'budget'] = 8000000 train.loc[train['id'] == 850,'budget'] = 1500000 train.loc[train['id'] == 1199,'budget'] = 5 train.loc[train['id'] == 1282,'budget'] = 9 train.loc[train['id'] == 1347,'budget'] = 1 train.loc[train['id'] == 1755,'budget'] = 2 train.loc[train['id'] == 1801,'budget'] = 5 train.loc[train['id'] == 1918,'budget'] = 592 train.loc[train['id'] == 2033,'budget'] = 4 train.loc[train['id'] == 2118,'budget'] = 344 train.loc[train['id'] == 2252,'budget'] = 130 train.loc[train['id'] == 2256,'budget'] = 1 train.loc[train['id'] == 2696,'budget'] = 10000000 #for i, e in enumerate(train['belongs_to_collection'][:2]): #print(i, e) sns.distplot(train["budget"]) train['budget'].skew() train['budget'].unique() train[(train['budget']==0)].shape train[(train['budget']==0)].head() train[(train['budget']<13)].shape train['popularity'].unique() sns.distplot(train["popularity"]) train['popularity'].skew() train['popularity']=train['popularity'].astype(int) train['popularity'].unique() train.isnull().sum() train['runtime'].unique() #train['runtime']=train['runtime'].astype(int) train[(train['runtime'].isnull())] train.drop([2302],inplace=True) train['runtime'].fillna(130,inplace=True) sns.distplot(train["runtime"]) train['runtime'].skew() sns.distplot(train["revenue"]) train['revenue'].skew() train['revenue'].unique() if 'genres' in train: train_genres = train['genres'].str.extractall('([A-Z]\w{0,})') train_genres = train_genres.unstack(level=1,fill_value='Not Found') train_genres.columns = ['genres_1', 'genres_2', 'genres_3', 'genres_4', 'genres_5', 'genres_6', 'genres_7', 'genres_8'] train_genres.head(3) if 'genres' in train: train = train.drop(['genres'], axis=1) if train_genres['genres_1'].isna().any(): train_genres[train_genres['genres_1'].isnull()] if len(train_genres) > 0: train = pd.concat([train, train_genres], axis =1) train.dropna() train.head(2) train[(train['genres_1'].isnull())] train[(train['genres_1']=='Not Found')].shape train.drop([470,1622,1814,1819,2423,2686,2900],inplace=True) train.drop(columns=['genres_2','genres_3','genres_4','genres_5','genres_6','genres_7','genres_8'],inplace=True) train.drop(columns=['production_companies','belongs_to_collection','cast','crew','title','imdb_id'],inplace=True) train.head(3) train[(train['budget'] == 0) & (train['revenue'] == 0)].shape train[(train['budget'] == 0) & (train['revenue'] >= 1)].shape train[(train['budget']==0) & (train['popularity'] <= 8.248895)].shape train[(train['budget']==0) & (train['original_language'] =='en')].shape train['release_date'] = pd.to_datetime(train['release_date']) train['Year'] = train['release_date'].dt.year train['Month'] = train['release_date'].dt.month train['Day'] = train['release_date'].dt.day train['dayOfWeek'] = train['release_date'].dt.day_name() train.head() train[(train['budget'] == 0) & (train['revenue'] <100000)].shape train['revenue'].median() train[(train['budget'] == 0) & (train['revenue'] >= 100000000) ].shape x=train[(train['budget'] > 0) & (train['revenue'] >= 100000000)] x['budget'].mean() train.loc[(train['budget'] == 0) & (train['revenue'] >= 100000000),'budget']=69704145.09803921 train[(train['budget'] == 0) & (train['revenue'] >= 10000000) ].shape y=train[(train['budget'] >0) & (train['revenue'] >= 10000000) ] y['budget'].mean() train.loc[(train['budget'] == 0) & (train['revenue'] >= 10000000), 'budget']=40652976.93165121 train[(train['budget'] ==0) & (train['revenue'] <10000000)].shape z=train[(train['budget'] >0) & (train['revenue'] <10000000)] z['budget'].mean() train.loc[(train['budget'] == 0) & (train['revenue'] >= 10000000), 'budget']=40652976.93165121 train.drop(columns=['Year','Month','Day','release_date'],inplace=True) train['original_language'].unique() train[(train['original_language'] =='en')].shape train.original_language.value_counts() lang={'en':1, 'hi':5, 'ko':8, 'sr':13, 'fr':2, 'it':7, 'nl':13, 'zh':10, 'es':4, 'cs':13, 'ta':12, 'cn':9, 'ru':3, 'tr':13, 'ja':6, 'fa':13, 'sv':13, 'de':11, 'te':13, 'pt':13, 'mr':13, 'da':13, 'fi':13, 'el':13, 'ur':13, 'he':13, 'no':13, 'ar':13, 'nb':13, 'ro':13, 'vi':13, 'pl':13, 'hu':13, 'ml':13, 'bn':13, 'id':13 } mappedoriginal_language = train['original_language'].map(lang) train['original_language'] = mappedoriginal_language train.head() train.loc[(train['dayOfWeek'] == 'Monday') ,'dayOfWeek'] =0 train.loc[(train['dayOfWeek'] == 'Tuesday') ,'dayOfWeek'] =0 train.loc[(train['dayOfWeek'] == 'Wednesday') ,'dayOfWeek'] =0 train.loc[(train['dayOfWeek'] == 'Thursday') ,'dayOfWeek'] =0 train.loc[(train['dayOfWeek'] == 'Friday') ,'dayOfWeek'] =1 train.loc[(train['dayOfWeek'] == 'Saturday') ,'dayOfWeek'] =1 train.loc[(train['dayOfWeek'] == 'Sunday') ,'dayOfWeek'] =1 train.head() Dict ={ 'Comedy':1, 'Drama': 2, 'Thriller':3, 'Action':4, 'Animation':5, 'Horror':6, 'Documentary':7, 'Adventure':8, 'Crime':9, 'Mystery':10, 'Fantasy':11, 'War':12, 'Science':13, 'Romance':14, 'Music':15, 'Western':16, 'Family':17, 'History':18, 'Foreign':19, 'TV':20 } genres = train['genres_1'].map(Dict) train['genres'] = genres train.head() train.drop(columns=['genres_1'],inplace=True) train.head() from sklearn.preprocessing import power_transform train['budget'] = power_transform(train[['budget']], method='yeo-johnson') train['budget'].skew() train['budget'] = power_transform(train[['budget']], method='yeo-johnson') from sklearn.preprocessing import power_transform train['revenue'] = power_transform(train[['revenue']], method='yeo-johnson') train['revenue'].skew() train['revenue'] = power_transform(train[['revenue']], method='yeo-johnson') targets = train['revenue'] train.drop(columns=['revenue'], inplace=True) from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(train, targets, test_size=0.3, random_state=9) print(X_train.shape) print(X_test.shape) from sklearn.ensemble import RandomForestRegressor regressor = RandomForestRegressor(n_estimators=10, random_state=0) regressor.fit(X_train, y_train) from sklearn.metrics import mean_squared_error train_pred = regressor.predict(X_train) mean_squared_error(y_train, train_pred) from sklearn.metrics import mean_squared_error y_pred = regressor.predict(X_test) mean_squared_error(y_test, y_pred) importances =regressor.feature_importances_ #Visalize feature importances # Sort feature importances in descending order indices = np.argsort(importances)[::-1] # Rearrange feature names so they match the sorted feature importances names = [X_train.columns[i] for i in indices] # Create plot plt.figure() # Create plot title plt.title("Feature Importance") # Add bars plt.bar(range(X_train.shape[1]), importances[indices]) # Add feature names as x-axis labels plt.xticks(range(X_train.shape[1]), names, rotation=90) # Show plot plt.show()<jupyter_output><empty_output>
no_license
/Movie Review.ipynb
sandy199351/santhosh
3
<jupyter_start><jupyter_text>## **Money Flow Index** 1. MFI generates overbought or oversold signals using both prices and volume data. 2. An MFI reading > 80 is considered overbought and an MFI reading < 20 is considered oversold. 3. MFI is most beneficial when there is a divergence. A divergence is when the oscillator is moving in the opposite direction of price. This is a signal of a potential reversal in the prevailing price trend. 4. Ex- a very high MFI, which begins to fall below 80, even when the closing price is increasing is a price reversal signal to the downside and vice-versa. 5. In an uptrend, a drop < 20-30 followed by a rally back above it could indicate a pullback is over and the price uptrend is resuming. The same goes for a downtrend. A short-term rally could push the MFI up to 70 or 80, but when it drops back below that could be the time to enter a short trade in preparation for another drop. Details: https://www.investopedia.com/terms/m/mfi.asp <jupyter_code>#Importing Libraries import numpy as np import pandas as pd from pandas_datareader import data import matplotlib.pyplot as plt from datetime import datetime from datetime import timedelta import warnings warnings.filterwarnings('ignore') #Installing yfinance library for fetching historical stock data !pip install yfinance import yfinance as yf #Function to get the stock data def stock_data(stock, start_date, end_date): tickerSymbol = stock #get data on this ticker tickerData = yf.Ticker(tickerSymbol) #get the historical prices for this ticker tickerDf = tickerData.history(period='1d', start=start_date, end=end_date) return tickerDf #Fetching a 1Y dataset for better representation df = stock_data("ITC.BO", "2020-06-08", "2021-06-08") df.reset_index(level="Date", inplace=True) df.drop(columns = ['Dividends', 'Stock Splits'], inplace=True) print(df.shape) df.head() #Creating a dataset with only Closing Price column and Date as index data = df.sort_index(ascending=True,axis=0) df1 = pd.DataFrame(index=range(0,len(df)),columns=['Date','Close']) for i in range(0,len(data)): df1["Date"][i]=data['Date'][i] df1["Close"][i]=data["Close"][i] df1.index=df1.Date df1.drop("Date",axis=1,inplace=True) print(df1.shape) df1.head() #Defining Function def MFI(data, period): #Calculating the typical price typical_price = (data['Close'] + data['High'] + data['Low']) / 3 #Calculate the money flow money_flow = typical_price * data['Volume'] #Get all of the positive and negative money flows positive_flow =[] negative_flow = [] for i in range(1, len(typical_price)): if typical_price[i] > typical_price[i-1]: #if the present typical price is greater than yesterdays typical price positive_flow.append(money_flow[i-1])# Then append money flow at position i-1 to the positive flow list negative_flow.append(0) elif typical_price[i] < typical_price[i-1]:#if the present typical price is less than yesterdays typical price negative_flow.append(money_flow[i-1])# Then append money flow at position i-1 to negative flow list positive_flow.append(0) else: positive_flow.append(0) negative_flow.append(0) #Get all of the positive and negative money flows within the time period positive_mf =[] negative_mf = [] for i in range(period-1, len(positive_flow)): positive_mf.append(sum(positive_flow[i+1-period : i+1])) for i in range(period-1, len(negative_flow)): negative_mf.append(sum(negative_flow[i+1-period : i+1])) #Calculating MFI MFI = 100 * (np.array(positive_mf) / (np.array(positive_mf) + np.array(negative_mf) )) #Creating a new DataFrame as MFI and df was different indices by delta new_df = pd.DataFrame() new_df = data[period:] new_df['MFI'] = MFI return MFI, new_df #Implementing Function Money_Flow_Index, new_df = MFI(df,14) new_df.head() #Plotting The Money Flow Index df2 = pd.DataFrame() df2['MFI'] = Money_Flow_Index plt.figure(figsize=(12.2,4.5)) plt.plot( df2['MFI'], label='MFI') plt.axhline(10, linestyle='--', color = 'orange') #Over Sold line (Buy) plt.axhline(20, linestyle='--',color = 'blue') #Over Sold Line (Buy) plt.axhline(80, linestyle='--', color = 'blue') #Over Bought line (Sell) plt.axhline(90, linestyle='--', color = 'orange') #Over Bought line (Sell) plt.title('MFI') plt.ylabel('MFI Values',fontsize=18) plt.legend(df2.columns.values, loc='upper left') plt.show()<jupyter_output><empty_output>
no_license
/Money_Flow_Index_(MFI).ipynb
makulkarni-mba/Stock-Prices-Analytics-Dashboard
1
<jupyter_start><jupyter_text> # Spatiotemporal permutation F-test on full sensor data Tests for differential evoked responses in at least one condition using a permutation clustering test. The FieldTrip neighbor templates will be used to determine the adjacency between sensors. This serves as a spatial prior to the clustering. Spatiotemporal clusters will then be visualized using custom matplotlib code. Here, the unit of observation is epochs from a specific study subject. However, the same logic applies when the unit observation is a number of study subject each of whom contribute their own averaged data (i.e., an average of their epochs). This would then be considered an analysis at the "2nd level". See the [FieldTrip tutorial](ft_cluster_) for a caveat regarding the possible interpretation of "significant" clusters. For more information on cluster-based permutation testing in MNE-Python, see also: `tut-cluster-one-samp-tfr` <jupyter_code># Authors: Denis Engemann <[email protected]> # Jona Sassenhagen <[email protected]> # Alex Rockhill <[email protected]> # Stefan Appelhoff <[email protected]> # # License: BSD-3-Clause import numpy as np import matplotlib.pyplot as plt from mpl_toolkits.axes_grid1 import make_axes_locatable import scipy.stats import mne from mne.stats import spatio_temporal_cluster_test, combine_adjacency from mne.datasets import sample from mne.channels import find_ch_adjacency from mne.viz import plot_compare_evokeds from mne.time_frequency import tfr_morlet<jupyter_output><empty_output><jupyter_text>## Set parameters <jupyter_code>data_path = sample.data_path() meg_path = data_path / 'MEG' / 'sample' raw_fname = meg_path / 'sample_audvis_filt-0-40_raw.fif' event_fname = meg_path / 'sample_audvis_filt-0-40_raw-eve.fif' event_id = {'Aud/L': 1, 'Aud/R': 2, 'Vis/L': 3, 'Vis/R': 4} tmin = -0.2 tmax = 0.5 # Setup for reading the raw data raw = mne.io.read_raw_fif(raw_fname, preload=True) raw.filter(1, 30) events = mne.read_events(event_fname)<jupyter_output><empty_output><jupyter_text>## Read epochs for the channel of interest <jupyter_code>picks = mne.pick_types(raw.info, meg='mag', eog=True) reject = dict(mag=4e-12, eog=150e-6) epochs = mne.Epochs(raw, events, event_id, tmin, tmax, picks=picks, baseline=None, reject=reject, preload=True) epochs.drop_channels(['EOG 061']) epochs.equalize_event_counts(event_id) # Obtain the data as a 3D matrix and transpose it such that # the dimensions are as expected for the cluster permutation test: # n_epochs × n_times × n_channels X = [epochs[event_name].get_data() for event_name in event_id] X = [np.transpose(x, (0, 2, 1)) for x in X]<jupyter_output><empty_output><jupyter_text>## Find the FieldTrip neighbor definition to setup sensor adjacency <jupyter_code>adjacency, ch_names = find_ch_adjacency(epochs.info, ch_type='mag') print(type(adjacency)) # it's a sparse matrix! fig, ax = plt.subplots(figsize=(5, 4)) ax.imshow(adjacency.toarray(), cmap='gray', origin='lower', interpolation='nearest') ax.set_xlabel('{} Magnetometers'.format(len(ch_names))) ax.set_ylabel('{} Magnetometers'.format(len(ch_names))) ax.set_title('Between-sensor adjacency') fig.tight_layout()<jupyter_output><empty_output><jupyter_text>## Compute permutation statistic How does it work? We use clustering to "bind" together features which are similar. Our features are the magnetic fields measured over our sensor array at different times. This reduces the multiple comparison problem. To compute the actual test-statistic, we first sum all F-values in all clusters. We end up with one statistic for each cluster. Then we generate a distribution from the data by shuffling our conditions between our samples and recomputing our clusters and the test statistics. We test for the significance of a given cluster by computing the probability of observing a cluster of that size :footcite:`MarisOostenveld2007,Sassenhagen2019`. <jupyter_code># We are running an F test, so we look at the upper tail # see also: https://stats.stackexchange.com/a/73993 tail = 1 # We want to set a critical test statistic (here: F), to determine when # clusters are being formed. Using Scipy's percent point function of the F # distribution, we can conveniently select a threshold that corresponds to # some alpha level that we arbitrarily pick. alpha_cluster_forming = 0.001 # For an F test we need the degrees of freedom for the numerator # (number of conditions - 1) and the denominator (number of observations # - number of conditions): n_conditions = len(event_id) n_observations = len(X[0]) dfn = n_conditions - 1 dfd = n_observations - n_conditions # Note: we calculate 1 - alpha_cluster_forming to get the critical value # on the right tail f_thresh = scipy.stats.f.ppf(1 - alpha_cluster_forming, dfn=dfn, dfd=dfd) # run the cluster based permutation analysis cluster_stats = spatio_temporal_cluster_test(X, n_permutations=1000, threshold=f_thresh, tail=tail, n_jobs=1, buffer_size=None, adjacency=adjacency) F_obs, clusters, p_values, _ = cluster_stats<jupyter_output><empty_output><jupyter_text>NoteNote how we only specified an adjacency for sensors! However, because we used :func:`mne.stats.spatio_temporal_cluster_test`, an adjacency for time points was automatically taken into account. That is, at time point N, the time points N - 1 and N + 1 were considered as adjacent (this is also called "lattice adjacency"). This is only possbile because we ran the analysis on 2D data (times × channels) per observation ... for 3D data per observation (e.g., times × frequencies × channels), we will need to use :func:`mne.stats.combine_adjacency`, as shown further below. Note also that the same functions work with source estimates. The only differences are the origin of the data, the size, and the adjacency definition. It can be used for single trials or for groups of subjects. ## Visualize clusters <jupyter_code># We subselect clusters that we consider significant at an arbitrarily # picked alpha level: "p_accept". # NOTE: remember the caveats with respect to "significant" clusters that # we mentioned in the introduction of this tutorial! p_accept = 0.01 good_cluster_inds = np.where(p_values < p_accept)[0] # configure variables for visualization colors = {"Aud": "crimson", "Vis": 'steelblue'} linestyles = {"L": '-', "R": '--'} # organize data for plotting evokeds = {cond: epochs[cond].average() for cond in event_id} # loop over clusters for i_clu, clu_idx in enumerate(good_cluster_inds): # unpack cluster information, get unique indices time_inds, space_inds = np.squeeze(clusters[clu_idx]) ch_inds = np.unique(space_inds) time_inds = np.unique(time_inds) # get topography for F stat f_map = F_obs[time_inds, ...].mean(axis=0) # get signals at the sensors contributing to the cluster sig_times = epochs.times[time_inds] # create spatial mask mask = np.zeros((f_map.shape[0], 1), dtype=bool) mask[ch_inds, :] = True # initialize figure fig, ax_topo = plt.subplots(1, 1, figsize=(10, 3)) # plot average test statistic and mark significant sensors f_evoked = mne.EvokedArray(f_map[:, np.newaxis], epochs.info, tmin=0) f_evoked.plot_topomap(times=0, mask=mask, axes=ax_topo, cmap='Reds', vmin=np.min, vmax=np.max, show=False, colorbar=False, mask_params=dict(markersize=10)) image = ax_topo.images[0] # create additional axes (for ERF and colorbar) divider = make_axes_locatable(ax_topo) # add axes for colorbar ax_colorbar = divider.append_axes('right', size='5%', pad=0.05) plt.colorbar(image, cax=ax_colorbar) ax_topo.set_xlabel( 'Averaged F-map ({:0.3f} - {:0.3f} s)'.format(*sig_times[[0, -1]])) # add new axis for time courses and plot time courses ax_signals = divider.append_axes('right', size='300%', pad=1.2) title = 'Cluster #{0}, {1} sensor'.format(i_clu + 1, len(ch_inds)) if len(ch_inds) > 1: title += "s (mean)" plot_compare_evokeds(evokeds, title=title, picks=ch_inds, axes=ax_signals, colors=colors, linestyles=linestyles, show=False, split_legend=True, truncate_yaxis='auto') # plot temporal cluster extent ymin, ymax = ax_signals.get_ylim() ax_signals.fill_betweenx((ymin, ymax), sig_times[0], sig_times[-1], color='orange', alpha=0.3) # clean up viz mne.viz.tight_layout(fig=fig) fig.subplots_adjust(bottom=.05) plt.show()<jupyter_output><empty_output><jupyter_text>## Permutation statistic for time-frequencies Let's do the same thing with the time-frequency decomposition of the data (see `tut-sensors-time-freq` for a tutorial and `ex-tfr-comparison` for a comparison of time-frequency methods) to show how cluster permutations can be done on higher-dimensional data. <jupyter_code>decim = 4 freqs = np.arange(7, 30, 3) # define frequencies of interest n_cycles = freqs / freqs[0] epochs_power = list() for condition in [epochs[k] for k in ('Aud/L', 'Vis/L')]: this_tfr = tfr_morlet(condition, freqs, n_cycles=n_cycles, decim=decim, average=False, return_itc=False) this_tfr.apply_baseline(mode='ratio', baseline=(None, 0)) epochs_power.append(this_tfr.data) # transpose again to (epochs, frequencies, times, channels) X = [np.transpose(x, (0, 2, 3, 1)) for x in epochs_power]<jupyter_output><empty_output><jupyter_text>Remember the note on the adjacency matrix from above: For 3D data, as here, we must use :func:`mne.stats.combine_adjacency` to extend the sensor-based adjacency to incorporate the time-frequency plane as well. Here, the integer inputs are converted into a lattice and combined with the sensor adjacency matrix so that data at similar times and with similar frequencies and at close sensor locations are clustered together. <jupyter_code># our data at each observation is of shape frequencies × times × channels tfr_adjacency = combine_adjacency( len(freqs), len(this_tfr.times), adjacency)<jupyter_output><empty_output><jupyter_text>Now we can run the cluster permutation test, but first we have to set a threshold. This example decimates in time and uses few frequencies so we need to increase the threshold from the default value in order to have differentiated clusters (i.e., so that our algorithm doesn't just find one large cluster). For a more principled method of setting this parameter, threshold-free cluster enhancement may be used. See `disc-stats` for a discussion. <jupyter_code># This time we don't calculate a threshold based on the F distribution. # We might as well select an arbitrary threshold for cluster forming tfr_threshold = 15.0 # run cluster based permutation analysis cluster_stats = spatio_temporal_cluster_test( X, n_permutations=1000, threshold=tfr_threshold, tail=1, n_jobs=1, buffer_size=None, adjacency=tfr_adjacency)<jupyter_output><empty_output><jupyter_text>Finally, we can plot our results. It is difficult to visualize clusters in time-frequency-sensor space; plotting time-frequency spectrograms and plotting topomaps display time-frequency and sensor space respectively but they are difficult to combine. We will plot topomaps with the clustered sensors colored in white adjacent to spectrograms in order to provide a visualization of the results. This is a dimensionally limited view, however. Each sensor has its own significant time-frequencies, but, in order to display a single spectrogram, all the time-frequencies that are significant for any sensor in the cluster are plotted as significant. This is a difficulty inherent to visualizing high-dimensional data and should be taken into consideration when interpreting results. <jupyter_code>F_obs, clusters, p_values, _ = cluster_stats good_cluster_inds = np.where(p_values < p_accept)[0] for i_clu, clu_idx in enumerate(good_cluster_inds): # unpack cluster information, get unique indices freq_inds, time_inds, space_inds = clusters[clu_idx] ch_inds = np.unique(space_inds) time_inds = np.unique(time_inds) freq_inds = np.unique(freq_inds) # get topography for F stat f_map = F_obs[freq_inds].mean(axis=0) f_map = f_map[time_inds].mean(axis=0) # get signals at the sensors contributing to the cluster sig_times = epochs.times[time_inds] # initialize figure fig, ax_topo = plt.subplots(1, 1, figsize=(10, 3)) # create spatial mask mask = np.zeros((f_map.shape[0], 1), dtype=bool) mask[ch_inds, :] = True # plot average test statistic and mark significant sensors f_evoked = mne.EvokedArray(f_map[:, np.newaxis], epochs.info, tmin=0) f_evoked.plot_topomap(times=0, mask=mask, axes=ax_topo, cmap='Reds', vmin=np.min, vmax=np.max, show=False, colorbar=False, mask_params=dict(markersize=10)) image = ax_topo.images[0] # create additional axes (for ERF and colorbar) divider = make_axes_locatable(ax_topo) # add axes for colorbar ax_colorbar = divider.append_axes('right', size='5%', pad=0.05) plt.colorbar(image, cax=ax_colorbar) ax_topo.set_xlabel( 'Averaged F-map ({:0.3f} - {:0.3f} s)'.format(*sig_times[[0, -1]])) # remove the title that would otherwise say "0.000 s" ax_topo.set_title("") # add new axis for spectrogram ax_spec = divider.append_axes('right', size='300%', pad=1.2) title = 'Cluster #{0}, {1} spectrogram'.format(i_clu + 1, len(ch_inds)) if len(ch_inds) > 1: title += " (max over channels)" F_obs_plot = F_obs[..., ch_inds].max(axis=-1) F_obs_plot_sig = np.zeros(F_obs_plot.shape) * np.nan F_obs_plot_sig[tuple(np.meshgrid(freq_inds, time_inds))] = \ F_obs_plot[tuple(np.meshgrid(freq_inds, time_inds))] for f_image, cmap in zip([F_obs_plot, F_obs_plot_sig], ['gray', 'autumn']): c = ax_spec.imshow(f_image, cmap=cmap, aspect='auto', origin='lower', extent=[epochs.times[0], epochs.times[-1], freqs[0], freqs[-1]]) ax_spec.set_xlabel('Time (ms)') ax_spec.set_ylabel('Frequency (Hz)') ax_spec.set_title(title) # add another colorbar ax_colorbar2 = divider.append_axes('right', size='5%', pad=0.05) plt.colorbar(c, cax=ax_colorbar2) ax_colorbar2.set_ylabel('F-stat') # clean up viz mne.viz.tight_layout(fig=fig) fig.subplots_adjust(bottom=.05) plt.show()<jupyter_output><empty_output>
permissive
/1.0/_downloads/7ca3f34c286b629113cbb522edf26a21/75_cluster_ftest_spatiotemporal.ipynb
mne-tools/mne-tools.github.io
10
<jupyter_start><jupyter_text># US visa application analysis## Section 1 -- Data cleaning<jupyter_code># read the csv file using pandas import pandas as pd import numpy as np data = pd.read_csv("us_perm_visas.csv") # original data size # rows: 374362 # columns: 154 data.shape data<jupyter_output><empty_output><jupyter_text>We recognized that our dataset is extremely sparse and has a lot of NaN cells. In order to get a more meaningful analysis, we need to clean the data by erasing some of the columns first.<jupyter_code># drop the columns with density lowe # data_clean = data.dropna(axis=1, thresh=data.shape[0]*0.638) data_clean = data.dropna(axis=1, thresh=data.shape[0]*0.639) print(data_clean.shape) # drop the rows with any nah value data_clean = data_clean.dropna(axis=0, how="any") # data_clean = data_clean.dropna(axis=0, thresh=data.shape[1]*0.3) # data_clean = data_clean.dropna(axis=0, thresh=data.shape[1]*0.4) print(data_clean.shape) pd.set_option('display.max_rows', 500) pd.set_option('display.max_columns', 500) data_clean<jupyter_output><empty_output>
no_license
/US_visa_application_analysis.ipynb
fall4knight/US-visa-application
2
<jupyter_start><jupyter_text># Вебинар 6. Консультация по курсовому проекту. ### Задание для курсового проекта Метрика: R2 - коэффициент детерминации (sklearn.metrics.r2_score) Сдача проекта: 1. Прислать в раздел Задания Урока 10 ("Вебинар. Консультация по итоговому проекту") ссылку на программу в github (программа должна содержаться в файле Jupyter Notebook с расширением ipynb). (Pull request не нужен, только ссылка ведущая на сам скрипт). 2. Приложить файл с названием по образцу NVBaranov_predictions.csv с предсказанными ценами для квартир из test.csv (файл должен содержать два поля: Id, Price). В файле с предсказаниями должна быть 5001 строка (названия колонок + 5000 предсказаний). Сроки и условия сдачи: Дедлайн: сдать проект нужно в течение 72 часов после начала Урока 10 ("Вебинар. Консультация по итоговому проекту"). Для успешной сдачи должны быть все предсказания (для 5000 квартир) и R2 должен быть больше 0.6. При сдаче до дедлайна результат проекта может попасть в топ лучших результатов. Повторная сдача и проверка результатов возможны только при условии предыдущей неуспешной сдачи. Успешный проект нельзя пересдать в целях повышения результата. Проекты, сданные после дедлайна или сданные повторно, не попадают в топ лучших результатов, но можно узнать результат. В качестве итогового результата берется первый успешный результат, последующие успешные результаты не учитываются. Примечание: Все файлы csv должны содержать названия полей (header - то есть "шапку"), разделитель - запятая. В файлах не должны содержаться индексы из датафрейма. Рекомендации для файла с кодом (ipynb): 1. Файл должен содержать заголовки и комментарии 2. Повторяющиеся операции лучше оформлять в виде функций 3. Не делать вывод большого количества строк таблиц (5-10 достаточно) 4. По возможности добавлять графики, описывающие данные (около 3-5) 5. Добавлять только лучшую модель, то есть не включать в код все варианты решения проекта 6. Скрипт проекта должен отрабатывать от начала и до конца (от загрузки данных до выгрузки предсказаний) 7. Весь проект должен быть в одном скрипте (файл ipynb). 8. При использовании статистик (среднее, медиана и т.д.) в качестве признаков, лучше считать их на трейне, и потом на валидационных и тестовых данных не считать статистики заново, а брать их с трейна. Если хватает знаний, можно использовать кросс-валидацию, но для сдачи этого проекта достаточно разбить данные из train.csv на train и valid. 9. Проект должен полностью отрабатывать за разумное время (не больше 10 минут), поэтому в финальный вариант лучше не включать GridSearch с перебором большого количества сочетаний параметров. 10. Допускается применение любых моделей машинного обучения из библиотеки sklearn.### Прогнозирование на тестовом датасете 1. Выполнить для тестового датасета те же этапы обработки и постронияния признаков (лучше выполнять действия сразу для двух датасетов) 2. Не потерять и не перемешать индексы от примеров при построении прогнозов 3. Прогнозы должны быть для все примеров из тестового датасета (для всех строк)**Подключение библиотек и скриптов**<jupyter_code>import numpy as np import pandas as pd import random from sklearn.model_selection import train_test_split, cross_val_score from sklearn.preprocessing import StandardScaler, MinMaxScaler from sklearn.ensemble import RandomForestRegressor from sklearn.metrics import r2_score as r2 from sklearn.model_selection import KFold, GridSearchCV import matplotlib import matplotlib.pyplot as plt import seaborn as sns %matplotlib inline import warnings warnings.filterwarnings('ignore') matplotlib.rcParams.update({'font.size': 14}) pd.set_option('precision', 3) pd.set_option('max_columns', 100) def evaluate_preds(train_true_values, train_pred_values, val_true_values, val_pred_values): """ Функция для оценки работы модели Parameters: train_true_values - целевая переменная из тренировочной части датасета train_pred_values - предсказания модели по тренировочной части val_true_values - целевая переменная из валидационной части датасета val_pred_values - предсказания модели по валидационной части Returns: R2 на тренировочной и валидационной части, графики зависимости истинных значений от предсказаний """ print("Train R2:\t" + str(round(r2(train_true_values, train_pred_values), 3))) print("Valid R2:\t" + str(round(r2(val_true_values, val_pred_values), 3))) plt.figure(figsize=(18,10)) plt.subplot(121) sns.scatterplot(x=train_pred_values, y=train_true_values) plt.xlabel('Predicted values') plt.ylabel('True values') plt.title('Train sample prediction') plt.subplot(122) sns.scatterplot(x=val_pred_values, y=val_true_values) plt.xlabel('Predicted values') plt.ylabel('True values') plt.title('Test sample prediction') plt.show()<jupyter_output><empty_output><jupyter_text>**Пути к директориям и файлам**<jupyter_code>TRAIN_DATASET_PATH = 'datasets/project_task/train.csv' TEST_DATASET_PATH = 'datasets/project_task/test.csv'<jupyter_output><empty_output><jupyter_text>### Загрузка данных**Описание датасета** * **Id** - идентификационный номер квартиры * **DistrictId** - идентификационный номер района * **Rooms** - количество комнат * **Square** - площадь * **LifeSquare** - жилая площадь * **KitchenSquare** - площадь кухни * **Floor** - этаж * **HouseFloor** - количество этажей в доме * **HouseYear** - год постройки дома * **Ecology_1, Ecology_2, Ecology_3** - экологические показатели местности * **Social_1, Social_2, Social_3** - социальные показатели местности * **Healthcare_1, Helthcare_2** - показатели местности, связанные с охраной здоровья * **Shops_1, Shops_2** - показатели, связанные с наличием магазинов, торговых центров * **Price** - цена квартиры<jupyter_code>train_df = pd.read_csv('train.csv') display(train_df.tail()) print(train_df.shape) # train_df = train_df.set_index('Id') # train_df.head() test_df = pd.read_csv('test.csv') display(test_df.tail()) print(test_df.shape) # test_df = test_df.set_index('Id') train_df.shape[1]-1 == test_df.shape[1]<jupyter_output><empty_output><jupyter_text>### Приведение типов<jupyter_code>train_df.dtypes train_df['Id'] = train_df['Id'].astype(str) train_df['DistrictId'] = train_df['DistrictId'].astype(str)<jupyter_output><empty_output><jupyter_text>### Обзор данных**Целевая переменная**<jupyter_code>plt.figure(figsize = (16, 8)) train_df['Price'].hist(bins=30) plt.ylabel('Count') plt.xlabel('Price') plt.title('Target distribution') plt.show()<jupyter_output><empty_output><jupyter_text>**Количественные переменные**<jupyter_code>train_df.describe().T<jupyter_output><empty_output><jupyter_text>**Категориальные переменные**<jupyter_code>cat_colnames = train_df.select_dtypes(include='object').columns.tolist() cat_colnames for cat_colname in cat_colnames[2:]: print(str(cat_colname) + '\n\n' + str(train_df[cat_colname].value_counts()) + '\n' + '*' * 100 + '\n')<jupyter_output>Ecology_2 B 9903 A 97 Name: Ecology_2, dtype: int64 **************************************************************************************************** Ecology_3 B 9725 A 275 Name: Ecology_3, dtype: int64 **************************************************************************************************** Shops_2 B 9175 A 825 Name: Shops_2, dtype: int64 **************************************************************************************************** <jupyter_text>### Обработка выбросов**Rooms**<jupyter_code>train_df['Rooms'].value_counts() train_df.loc[train_df['Rooms'].isin([0, 10, 19]), 'Rooms'] = train_df['Rooms'].median()<jupyter_output><empty_output><jupyter_text>**Square, LifeSquare, KitchenSquare**<jupyter_code>train_df.describe() steps = [] scores = [] # <- записываем финальный score # steps.append('обработка пропусков, выбросов var1') train_df = train_df[train_df['Square'].isnull() | (train_df['Square'] < train_df['Square'].quantile(.99)) & (train_df['Square'] > train_df['Square'].quantile(.01))] train_df = train_df[train_df['LifeSquare'].isnull() | (train_df['LifeSquare'] < train_df['LifeSquare'].quantile(.99)) & (train_df['LifeSquare'] > train_df['LifeSquare'].quantile(.01))] train_df = train_df[train_df['KitchenSquare'].isnull() | (train_df['KitchenSquare'] < train_df['KitchenSquare'].quantile(.99)) & (train_df['KitchenSquare'] > train_df['KitchenSquare'].quantile(.01))] steps.append('обработка пропусков, выбросов var2') """ ... ... ... """ train_df.describe() train_df.loc[train_df['LifeSquare'] < 10, 'LifeSquare'] = 10 train_df.loc[train_df['KitchenSquare'] < 3, 'KitchenSquare'] = 3<jupyter_output><empty_output><jupyter_text>**HouseFloor, Floor**<jupyter_code>train_df['HouseFloor'].sort_values().unique() train_df['Floor'].sort_values().unique() train_df.loc[train_df['HouseFloor'] == 0, 'HouseFloor'] = train_df['HouseFloor'].median() floor_outliers = train_df[train_df['Floor'] > train_df['HouseFloor']].index train_df.loc[floor_outliers, 'Floor'] = train_df.loc[floor_outliers, 'HouseFloor'].apply(lambda x: random.randint(1, x)) <jupyter_output><empty_output><jupyter_text>**HouseYear**<jupyter_code>train_df['HouseYear'].sort_values().unique() train_df.loc[train_df['HouseYear'] > 2020, 'HouseYear'] = 2020<jupyter_output><empty_output><jupyter_text>### Обработка пропусков<jupyter_code>train_df.isnull().sum() train_df[['Square', 'LifeSquare', 'KitchenSquare']].head(10)<jupyter_output><empty_output><jupyter_text>**LifeSquare**<jupyter_code># медиана до корректировки train_df['LifeSquare'].median() # медиана расхождения площадей square_med_diff = (train_df.loc[train_df['LifeSquare'].notnull(), 'Square'] - train_df.loc[train_df['LifeSquare'].notnull(), 'LifeSquare'] - train_df.loc[train_df['LifeSquare'].notnull(), 'KitchenSquare']).median() square_med_diff train_df.loc[train_df['LifeSquare'].isnull(), 'LifeSquare'] = ( train_df.loc[train_df['LifeSquare'].isnull(), 'Square'] - train_df.loc[train_df['LifeSquare'].isnull(), 'KitchenSquare'] - square_med_diff ) train_df['LifeSquare'].median()<jupyter_output><empty_output><jupyter_text>**Healthcare_1**<jupyter_code>train_df['Healthcare_1'].head() train_df.loc[train_df['Healthcare_1'].isnull(), 'Healthcare_1'] = train_df['Healthcare_1'].median()<jupyter_output><empty_output><jupyter_text>### Построение новых признаков**Dummies**<jupyter_code>train_df['Ecology_2_bin'] = train_df['Ecology_2'].replace({'A':0, 'B':1}) train_df['Ecology_3_bin'] = train_df['Ecology_3'].replace({'A':0, 'B':1}) train_df['Shops_2_bin'] = train_df['Shops_2'].replace({'A':0, 'B':1})<jupyter_output><empty_output><jupyter_text>**DistrictSize, IsDistrictLarge**<jupyter_code>train_df['DistrictId'].value_counts() district_size = train_df['DistrictId'].value_counts().reset_index()\ .rename(columns={'index':'DistrictId', 'DistrictId':'DistrictSize'}) district_size.head() train_df = train_df.merge(district_size, on='DistrictId', how='left') train_df.head() (train_df['DistrictSize'] > 100).value_counts() train_df['IsDistrictLarge'] = (train_df['DistrictSize'] > 100).astype(int)<jupyter_output><empty_output><jupyter_text>**MedPriceByDistrict**<jupyter_code>med_price_by_district = train_df.groupby(['DistrictId', 'Rooms'], as_index=False).agg({'Price':'median'})\ .rename(columns={'Price':'MedPriceByDistrict'}) med_price_by_district.head() train_df = train_df.merge(med_price_by_district, on=['DistrictId', 'Rooms'], how='left') train_df.head() train_df['MedPriceByDistrict'].isnull().sum()<jupyter_output><empty_output><jupyter_text>*Пример переноса признака на test*<jupyter_code>test_df['DistrictId'] = test_df['DistrictId'].astype(str) test_df.merge(med_price_by_district, on=['DistrictId', 'Rooms'], how='left').info()<jupyter_output><class 'pandas.core.frame.DataFrame'> Int64Index: 5000 entries, 0 to 4999 Data columns (total 20 columns): Id 5000 non-null int64 DistrictId 5000 non-null object Rooms 5000 non-null float64 Square 5000 non-null float64 LifeSquare 3959 non-null float64 KitchenSquare 5000 non-null float64 Floor 5000 non-null int64 HouseFloor 5000 non-null float64 HouseYear 5000 non-null int64 Ecology_1 5000 non-null float64 Ecology_2 5000 non-null object Ecology_3 5000 non-null object Social_1 5000 non-null int64 Social_2 5000 non-null int64 Social_3 5000 non-null int64 Healthcare_1 2623 non-null float64 Helthcare_2 5000 non-null int64 Shops_1 5000 non-null int64 Shops_2 5000 non-null object MedPriceByDistrict 4919 non-null float64 dtypes: float64(8), int64(8), obj[...]<jupyter_text>### Отбор признаков<jupyter_code>train_df.columns.tolist() feature_names = ['Rooms', 'Square', 'LifeSquare', 'KitchenSquare', 'Floor', 'HouseFloor', 'HouseYear', 'Ecology_1', 'Ecology_2_bin', 'Ecology_3_bin', 'Social_1', 'Social_2', 'Social_3', 'Healthcare_1', 'Helthcare_2', 'Shops_1', 'Shops_2_bin'] new_feature_names = ['IsDistrictLarge', 'MedPriceByDistrict'] target_name = 'Price'<jupyter_output><empty_output><jupyter_text>### Разбиение на train и val<jupyter_code>X = train_df[feature_names + new_feature_names] y = train_df[target_name] X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33, shuffle=True, random_state=21)<jupyter_output><empty_output><jupyter_text>### Построение модели**Обучение и оценка модели**<jupyter_code>rf_model = RandomForestRegressor(random_state=21) rf_model.fit(X_train, y_train)<jupyter_output><empty_output><jupyter_text>**Отложенная выборка**<jupyter_code>y_train_preds = rf_model.predict(X_train) y_test_preds = rf_model.predict(X_test) evaluate_preds(y_train, y_train_preds, y_test, y_test_preds)<jupyter_output>Train R2: 0.952 Valid R2: 0.671 <jupyter_text>**Перекрёстная проверка**<jupyter_code>cv_score = cross_val_score(rf_model, X, y, scoring='r2', cv=KFold(n_splits=3, shuffle=True, random_state=21)) cv_score cv_score.mean()<jupyter_output><empty_output><jupyter_text>### XGBoost<jupyter_code>from xgboost import XGBRegressor xgb = xgboost.XGBRegressor(n_estimators=500, learning_rate=0.08, gamma=0, subsample=0.75, colsample_bytree=1, max_depth=7, objective ='reg:squarederror') xgb.fit(X_train, y_train) y_train_preds = xgb.predict(X_train) y_test_preds = xgb.predict(X_test) evaluate_preds(y_train, y_train_preds, y_test, y_test_preds) from sklearn.metrics import r2_score # A parameter grid for XGBoost params = {'min_child_weight':[4,5], 'gamma':[i/10.0 for i in range(3,6)], 'subsample':[i/10.0 for i in range(6,11)], 'max_depth': [2,3,4,7]} # Initialize XGB and GridSearch xgb = XGBRegressor(nthread=-1, objective ='reg:squarederror') grid = GridSearchCV(xgb, params) grid.fit(X_train,y_train) print(r2_score(y_test, grid.best_estimator_.predict(X_test))) y_train_preds = grid.best_estimator_.predict(X_train) y_test_preds = grid.best_estimator_.predict(X_test) evaluate_preds(y_train, y_train_preds, y_test, y_test_preds)<jupyter_output>Train R2: 0.917 Test R2: 0.714 <jupyter_text>**Важность признаков**<jupyter_code>feature_importances = pd.DataFrame(zip(X_train.columns, rf_model.feature_importances_), columns=['feature_name', 'importance']) feature_importances.sort_values(by='importance', ascending=False)<jupyter_output><empty_output>
permissive
/Web06.ipynb
gera09/Numpy_Matplotlib_Scikit-learn
25
<jupyter_start><jupyter_text>#### A function to calculate 3 statistical information of an array<jupyter_code>def stat_inf(dat): mea=round(np.mean(dat),2) sd=round(np.std(dat),2) med=round(np.median(dat),2) return [[mea,sd,med]] #Import data from file fname='insurance.txt' feats=('age','sex','bmi','children','smoker','region','expenses') #Define type for each features typ_f=('i2','U7','f2','i2','U3','U9','f4') data=np.loadtxt(fname,dtype={'names': feats,'formats': typ_f},skiprows=1) <jupyter_output><empty_output><jupyter_text>### Import and analyze the data<jupyter_code># Prepare for output ofname='HW5_results.txt' results=np.empty((0,3)) res=dict() #1. Mean, sd and median of age results=np.append(results,stat_inf(data['age']),axis=0) res['Age']=results[0] #2. Mean, sd and median of bmi results=np.append(results,stat_inf(data['bmi']),axis=0) res['BMI']=results[1] #3. Mean, sd and median of bmi by group of sex att=np.unique(data['sex']) # find unique value in sex column i=2 for val in att: bmi_g=data['bmi'][data['sex']==val] results=np.append(results,stat_inf(bmi_g),axis=0) rownam='BMI - '+val res[rownam]=results[i] i+=1 #4. Mean, sd and median of bmi for smoker and non smoker att=np.unique(data['smoker']) for val in att: bmi_g=data['bmi'][data['smoker']==val] results=np.append(results,stat_inf(bmi_g),axis=0) rownam='BMI - smoking - '+val res[rownam]=results[i] i+=1 #5.Mean, sd and median of bmi grouped by region att=np.unique(data['region']) for val in att: bmi_g=data['bmi'][data['region']==val] results=np.append(results,stat_inf(bmi_g),axis=0) rownam='BMI - region - '+val res[rownam]=results[i] i+=1 #6 Mean, sd and median of bmi of those who have more than 2 children bmi_g=data['bmi'][data['children']>2] results=np.append(results,stat_inf(bmi_g),axis=0) rownam='BMI - #children > 2' res[rownam]=results[i] # Extra. Mean, sd and median of bmi grouped by number of children att=np.unique(data['children']) for val in att: bmi_g=data['bmi'][data['children']==val] rownam='BMI - #children = '+str(val) res[rownam]=stat_inf(bmi_g) # Write results to a file np.savetxt(ofname,results,fmt='%.4f') # Print the result with names for key, value in res.items(): print(f'{key:30}{value}') <jupyter_output>Age [39.21 14.04 39. ] BMI [30.6875 6.1015625 30.40625 ] BMI - female [30.375 6.0390625 30.09375 ] BMI - male [30.953125 6.140625 30.703125] BMI - smoking - no [30.65625 6.0390625 30.34375 ] BMI - smoking - yes [30.71875 6.30859375 30.453125 ] BMI - region - northeast [29.1875 5.921875 28.90625 ] BMI - region - northwest [29.203125 5.12890625 28.90625 ] BMI - region - southeast [33.375 6.48046875 33.3125 ] BMI - region - southwest [30.59375 5.6796875 30.296875 ] BMI - #children > 2 [30.6875 5.76171875 30.34375 ] BMI - #children = 0 [[30.55, 6.04, 30.3]] BMI - #children = 1 [[30.62, 6.09, 30.0]] BMI - #children = 2 [[30.98, 6.5, 31.66]] BMI - #children = 3 [[30.69, 5.77, 30.5]] BMI - #children = 4 [[31.4, 4.53, 29.6]] BMI - #children = 5 [[29.[...]<jupyter_text>### How the following factors affect BMI 1. Smoking habit It seem that people that does not smoke has slightly lower BMI than the ones who smokes. but the different is no significant because the sd is around 6 and the abs difference of means of 2 group is around 0.06''' 2. Region People live in north seems to have lower BMI than people live in the south. Northwest people has the highest mean of BMI''' 3. Children: As number of children increase, the BMI index increase. and it reachs the peak at 4 kids. After that, BMI decrease to the lowest.'''### What are the primary reasons for the top 20% of the expenses? In particular, sort the data by expense, and compute the mean, and standard deviation of BMI and the mode of smoker and region. How do these values differ from the rest 80% of the population? <jupyter_code>ex_data=np.sort(data, order='expenses') # sort by expensese div_ind=int(np.floor(data.shape[0]*0.8)) # index to split the data 80/20 dat_be, dat_te=np.split(ex_data,[div_ind]) # split data into 2 group print('\nFor 20% of the expenses, ') print('BMI factor has Mean =',np.mean(dat_te['bmi']), 'SD =',np.std(dat_te['bmi']), ' and median =',np.median(dat_te['bmi']) ) ele,cou=np.unique(dat_te['smoker'],return_counts=True) print('The frequency of smoker is') print(ele) print(cou) print('- The primary smoker status is',ele[np.argmax(cou)]) ele,cou=np.unique(dat_te['region'],return_counts=True) print('The frequency of region is') print(ele) print(cou) print('- The primary region status is',ele[np.argmax(cou)]) print('\nFor the rest of population, ') print('BMI factor has Mean =',np.mean(dat_be['bmi']), 'SD =',np.std(dat_be['bmi']), ' and median =',np.median(dat_be['bmi']) ) ele,cou=np.unique(dat_be['smoker'],return_counts=True) print('The frequency of smoker is') print(ele) print(cou) print('- The primary smoker status is',ele[np.argmax(cou)]) ele,cou=np.unique(dat_be['region'],return_counts=True) print('The frequency of region is') print(ele) print(cou) print('- The primary region status is',ele[np.argmax(cou)]) <jupyter_output> For 20% of the expenses, BMI factor has Mean = 32.22 SD = 5.72 and median = 32.06 The frequency of smoker is ['no' 'yes'] [ 60 208] - The primary smoker status is yes The frequency of region is ['northeast' 'northwest' 'southeast' 'southwest'] [67 59 88 54] - The primary region status is southeast For the rest of population, BMI factor has Mean = 30.28 SD = 6.125 and median = 29.9 The frequency of smoker is ['no' 'yes'] [1004 66] - The primary smoker status is no The frequency of region is ['northeast' 'northwest' 'southeast' 'southwest'] [257 266 276 271] - The primary region status is southeast
no_license
/.ipynb_checkpoints/HW5_P1_Team6-checkpoint.ipynb
duonghung86/MATH6380-Python-for-beginners
3
<jupyter_start><jupyter_text> Loops in PythonWelcome! This notebook will teach you about the loops in the Python Programming Language. By the end of this lab, you'll know how to use the loop statements in Python, including for loop, and while loop.Table of Contents Loops Range What is for loop? What is while loop? Quiz on Loops Estimated time needed: 20 min LoopsRangeSometimes, you might want to repeat a given operation many times. Repeated executions like this are performed by loops. We will look at two types of loops, for loops and while loops. Before we discuss loops lets discuss the range object. It is helpful to think of the range object as an ordered list. For now, let's look at the simplest case. If we would like to generate a sequence that contains three elements ordered from 0 to 2 we simply use the following command:<jupyter_code># Use the range range(3)<jupyter_output><empty_output><jupyter_text>What is for loop?The for loop enables you to execute a code block multiple times. For example, you would use this if you would like to print out every element in a list. Let's try to use a for loop to print all the years presented in the list dates:This can be done as follows:<jupyter_code># For loop example dates = [1982,1980,1973] N = len(dates) for i in range(N): print(dates[i]) <jupyter_output>1982 1980 1973 <jupyter_text>The code in the indent is executed N times, each time the value of i is increased by 1 for every execution. The statement executed is to print out the value in the list at index i as shown here:In this example we can print out a sequence of numbers from 0 to 7:<jupyter_code># Example of for loop for i in range(0, 8): print(i)<jupyter_output>0 1 2 3 4 5 6 7 <jupyter_text>In Python we can directly access the elements in the list as follows: <jupyter_code># Exmaple of for loop, loop through list for year in dates: print(year) <jupyter_output>1982 1980 1973 <jupyter_text>For each iteration, the value of the variable years behaves like the value of dates[i] in the first example:We can change the elements in a list:<jupyter_code># Use for loop to change the elements in list squares = ['red', 'yellow', 'green', 'purple', 'blue'] for i in range(0, 5): print("Before square ", i, 'is', squares[i]) squares[i] = 'weight' print("After square ", i, 'is', squares[i])<jupyter_output>Before square 0 is red After square 0 is weight Before square 1 is yellow After square 1 is weight Before square 2 is green After square 2 is weight Before square 3 is purple After square 3 is weight Before square 4 is blue After square 4 is weight <jupyter_text> We can access the index and the elements of a list as follows: <jupyter_code># Loop through the list and iterate on both index and element value squares=['red', 'yellow', 'green', 'purple', 'blue'] for i, square in enumerate(squares): print(i, square)<jupyter_output>0 red 1 yellow 2 green 3 purple 4 blue <jupyter_text>What is while loop?As you can see, the for loop is used for a controlled flow of repetition. However, what if we don't know when we want to stop the loop? What if we want to keep executing a code block until a certain condition is met? The while loop exists as a tool for repeated execution based on a condition. The code block will keep being executed until the given logical condition returns a **False** boolean value. Let’s say we would like to iterate through list dates and stop at the year 1973, then print out the number of iterations. This can be done with the following block of code:<jupyter_code># While Loop Example dates = [1982, 1980, 1973, 2000] i = 0 year = 0 while(year != 1973): year = dates[i] i = i + 1 print(year) print("It took ", i ,"repetitions to get out of loop.")<jupyter_output>1982 1980 1973 It took 3 repetitions to get out of loop. <jupyter_text>A while loop iterates merely until the condition in the argument is not met, as shown in the following figure:Quiz on LoopsWrite a for loop the prints out all the element between -5 and 5 using the range function.<jupyter_code># Write your code below and press Shift+Enter to execute for i in range(-5, 6): print(i)<jupyter_output>-5 -4 -3 -2 -1 0 1 2 3 4 5 <jupyter_text>Double-click __here__ for the solution. <!-- for i in range(-5, 6): print(i) -->Print the elements of the following list: Genres=[ 'rock', 'R&B', 'Soundtrack', 'R&B', 'soul', 'pop'] Make sure you follow Python conventions.<jupyter_code># Write your code below and press Shift+Enter to execute Genres=[ 'rock', 'R&B', 'Soundtrack', 'R&B', 'soul', 'pop'] for Genre in Genres: print(Genre)<jupyter_output>rock R&B Soundtrack R&B soul pop <jupyter_text>Double-click __here__ for the solution. <!-- Genres = ['rock', 'R&B', 'Soundtrack', 'R&B', 'soul', 'pop'] for Genre in Genres: print(Genre) -->Write a for loop that prints out the following list: squares=['red', 'yellow', 'green', 'purple', 'blue']<jupyter_code># Write your code below and press Shift+Enter to execute squares=['red', 'yellow', 'green', 'purple', 'blue'] for square in squares: print(square)<jupyter_output>red yellow green purple blue <jupyter_text>Double-click __here__ for the solution. <!-- squares=['red', 'yellow', 'green', 'purple', 'blue'] for square in squares: print(square) -->Write a while loop to display the values of the Rating of an album playlist stored in the list PlayListRatings. If the score is less than 6, exit the loop. The list PlayListRatings is given by: PlayListRatings = [10, 9.5, 10, 8, 7.5, 5, 10, 10]<jupyter_code># Write your code below and press Shift+Enter to execute PlayListRatings = [10, 9.5, 10, 8, 7.5, 5, 10, 10] i = 1 Rating = PlayListRatings[0] while(Rating >= 6): print(Rating) Rating = PlayListRatings[i] i = i + 1<jupyter_output>10 9.5 10 8 7.5 <jupyter_text>Double-click __here__ for the solution. <!-- PlayListRatings = [10, 9.5, 10, 8, 7.5, 5, 10, 10] i = 1 Rating = PlayListRatings[0] while(Rating >= 6): print(Rating) Rating = PlayListRatings[i] i = i + 1 -->Write a while loop to copy the strings 'orange' of the list squares to the list new_squares. Stop and exit the loop if the value on the list is not 'orange':<jupyter_code># Write your code below and press Shift+Enter to execute squares = ['orange', 'orange', 'purple', 'blue ', 'orange'] new_squares = [] i = 0 while(squares[i] == 'orange'): new_squares.append(squares[i]) i = i + 1 print (new_squares)<jupyter_output>['orange', 'orange']
permissive
/PY0101EN-3-2-Loops.ipynb
muhammadwaqasj/ibm_python_for_data_science
12
<jupyter_start><jupyter_text>##### Final Solution posted here: https://stackoverflow.com/questions/27969091/processing-an-image-of-a-table-to-get-data-from-it<jupyter_code>import cv2 import numpy as np import os # the list of images (tables) images = ['table1.png', 'table2.png', 'table3.png', 'table4.png', 'table5.png'] # the list of templates (used for template matching) templates = ['train1.png'] def remove_duplicates(lines): # remove duplicate lines (lines within 10 pixels of eachother) for x1, y1, x2, y2 in lines: for index, (x3, y3, x4, y4) in enumerate(lines): if y1 == y2 and y3 == y4: diff = abs(y1-y3) elif x1 == x2 and x3 == x4: diff = abs(x1-x3) else: diff = 0 if diff < 10 and diff is not 0: del lines[index] return lines def sort_line_list(lines): # sort lines into horizontal and vertical vertical = [] horizontal = [] for line in lines: if line[0] == line[2]: vertical.append(line) elif line[1] == line[3]: horizontal.append(line) vertical.sort() horizontal.sort(key=lambda x: x[1]) return horizontal, vertical def hough_transform_p(image, template, tableCnt): # open and process images img = cv2.imread('imgs/'+image) gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) edges = cv2.Canny(gray, 50, 150, apertureSize=3) # probabilistic hough transform lines = cv2.HoughLinesP(edges, 1, np.pi/180, 200, minLineLength=20, maxLineGap=999)[0].tolist() # remove duplicates lines = remove_duplicates(lines) # draw image for x1, y1, x2, y2 in lines: cv2.line(img, (x1, y1), (x2, y2), (0, 0, 255), 1) # sort lines into vertical & horizontal lists horizontal, vertical = sort_line_list(lines) # go through each horizontal line (aka row) rows = [] for i, h in enumerate(horizontal): if i < len(horizontal)-1: row = [] for j, v in enumerate(vertical): if i < len(horizontal)-1 and j < len(vertical)-1: # every cell before last cell # get width & height width = horizontal[i+1][1] - h[1] height = vertical[j+1][0] - v[0] else: # last cell, width = cell start to end of image # get width & height width = tW height = tH tW = width tH = height # get roi (region of interest) to find an x roi = img[h[1]:h[1]+width, v[0]:v[0]+height] # save image (for testing) dir = 'imgs/table%s' % (tableCnt+1) if not os.path.exists(dir): os.makedirs(dir) fn = '%s/roi_r%s-c%s.png' % (dir, i, j) cv2.imwrite(fn, roi) # if roi contains an x, add x to array, else add _ roi_gry = cv2.cvtColor(roi, cv2.COLOR_BGR2GRAY) ret, thresh = cv2.threshold(roi_gry, 127, 255, 0) contours, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) if len(contours) > 1: # there is an x for 2 or more contours row.append('x') else: # there is no x when len(contours) is <= 1 row.append('_') row.pop() rows.append(row) # save image (for testing) fn = os.path.splitext(image)[0] + '-hough_p.png' cv2.imwrite('imgs/'+fn, img) def process(): for i, img in enumerate(images): # perform probabilistic hough transform on each image hough_transform_p(img, templates[0], i) if __name__ == '__main__': process()<jupyter_output><empty_output><jupyter_text>##### Playing around with the Hough Transformation From the python cv2 page: http://opencv-python-tutroals.readthedocs.io/en/latest/py_tutorials/py_imgproc/py_houghlines/py_houghlines.html?highlight=detect%20lines<jupyter_code>import cv2 import numpy as np img = cv2.imread('images/sudoku-original.jpg') gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY) edges = cv2.Canny(gray,50,150,apertureSize = 3) lines = cv2.HoughLines(edges, 1, np.pi/180, 1) for rho,theta in lines[0]: a = np.cos(theta) b = np.sin(theta) x0 = a*rho y0 = b*rho x1 = int(x0 + 1000*(-b)) y1 = int(y0 + 1000*(a)) x2 = int(x0 - 1000*(-b)) y2 = int(y0 - 1000*(a)) cv2.line(img,(x1,y1),(x2,y2),(0,0,255),2) cv2.imwrite('images/sudoku-original-gray.jpg', gray) cv2.imwrite('images/sudoku-original-edges.jpg', edges) cv2.imwrite('images/houghlines1.jpg', img) <jupyter_output><empty_output><jupyter_text>##### Playing around with the Hough Transformation 2 From this stack overflow question: https://stackoverflow.com/questions/45322630/how-to-detect-lines-in-opencv<jupyter_code>img = cv2.imread('images/voltest-original.png') gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY) edges = cv2.Canny(gray,50,150,apertureSize = 3) rho = 1 # distance resolution in pixels of the Hough grid (originally 1) theta = np.pi / 180 # angular resolution in radians of the Hough grid threshold = 200 # minimum number of votes (intersections in Hough grid cell) (originally 15) min_line_length = 55 # minimum number of pixels making up a line (originally 50) max_line_gap = 1 # maximum gap in pixels between connectable line segments (originally 20) line_image = np.copy(img) * 0 # creating a blank to draw lines on # Run Hough on edge detected image # Output "lines" is an array containing endpoints of detected line segments lines = cv2.HoughLinesP(edges, rho, theta, threshold, np.array([]), min_line_length, max_line_gap) #for line in lines: # for x1,y1,x2,y2 in line: # cv2.line(line_image, (x1,y1), (x2,y2), (255,0,0), 5) # Draw the lines on the image lines_edges = cv2.addWeighted(img, 0.8, line_image, 1, 0) cv2.imwrite('images/voltest-lines.jpg', lines_edges) cv2.imwrite('images/voltest-lines-separate.jpg', line_image)<jupyter_output><empty_output><jupyter_text>##### Best Hough Transformation Below is my approach to tuning parameters and then applying the hough transformation to the image<jupyter_code>def draw_image(img, lines, params): # Create a blank image to draw lines on line_image = np.copy(img) * 0 # Draw lines from hough transform for line in lines: for x1,y1,x2,y2 in line: cv2.line(line_image, (x1,y1), (x2,y2), (255,0,0), 5) # Output image to file filename = 'images/voltest3-lines-{}-{}-{}.jpg'.format(params[0], params[1], params[2]) cv2.imwrite(filename, line_image) def hough_transform(img, threshold, min_line_length, max_line_gap): # Convert to grayscale and detect edges gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY) edges = cv2.Canny(gray,50,150,apertureSize = 3) rho = 1 # distance resolution in pixels of the Hough grid (originally 1) theta = np.pi / 180 # angular resolution in radians of the Hough grid #threshold = 200 # minimum number of votes (intersections in Hough grid cell) (originally 15) #min_line_length = 55 # minimum number of pixels making up a line (originally 50) #max_line_gap = 1 # maximum gap in pixels between connectable line segments (originally 20) params = [threshold, min_line_length, max_line_gap] # for output to draw_image # Apply Hough Transformation lines = cv2.HoughLinesP(edges, rho, theta, threshold, np.array([]), min_line_length, max_line_gap) # Draw Image & Output draw_image(img, lines, params) # Load original image img = cv2.imread('images/voltest3.png') # Tuning parameters thresholds = np.arange(200, 300, 100) min_line_lengths = np.arange(10, 26, 25) max_line_gaps = np.arange(0,15,5) # Run hough transform with all parameters for threshold_val in thresholds: for minline_val in min_line_lengths: for maxgap_val in max_line_gaps: hough_transform(img, threshold_val, minline_val, maxgap_val) <jupyter_output><empty_output><jupyter_text>consider countour detection with hough transform, and template matching Helpful Links: https://stackoverflow.com/questions/10196198/how-to-remove-convexity-defects-in-a-sudoku-square https://stackoverflow.com/questions/27969091/processing-an-image-of-a-table-to-get-data-from-it https://www.google.com/search?q=opencv+dave.jpg&rlz=1C1SQJL_enUS762US762&tbm=isch&tbo=u&source=univ&sa=X&ved=2ahUKEwiclLHf3d3cAhVowFQKHd7-BmwQsAR6BAgFEAE&biw=2000&bih=958#imgrc=VS-1FritL6V2MM http://opencv-python-tutroals.readthedocs.io/en/latest/py_tutorials/py_imgproc/py_houghlines/py_houghlines.html?highlight=detect%20lines https://stackoverflow.com/questions/27969091/processing-an-image-of-a-table-to-get-data-from-it https://stackoverflow.com/questions/45322630/how-to-detect-lines-in-opencv##### Contour Detection Below is my approach to using contour detection (instead of the hough transformation) to detect the tables in the image<jupyter_code>### Import libraries import numpy as np import cv2 # Prep image img = cv2.imread('images/voltest-original.png') imgray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # Run contour analysis ret, thresh = cv2.threshold(imgray, 127, 255, 0) im2, contours, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) # Sort contours contours = sorted(contours, key=cv2. contourArea, reverse = True) manual_tbl_countours = contours[1:4] # next two lines were pulled from stack question perimeters = [cv2.arcLength(contours[i],True) for i in range(len(contours))] listindex=[i for i in range(15) if perimeters[i]>perimeters[0]/2] # from here: https://www.pyimagesearch.com/2017/07/17/credit-card-ocr-with-opencv-and-python/ c = contours[1] (x, y, w, h) = cv2.boundingRect(c) # Show image imgcont = img.copy() #[cv2.drawContours(imgcont, [contours[i]], 0, (0,255,0), 5) for i in listindex] #plt.imshow(imgcont) cv2.drawContours(imgcont, contours, 10, (0,255,0), 3) cv2.imwrite("filename.jpg", imgcont) # from here: https://www.pyimagesearch.com/2017/07/17/credit-card-ocr-with-opencv-and-python/ c = contours[2] (x, y, w, h) = cv2.boundingRect(c) print(x,y,w,h)<jupyter_output><empty_output><jupyter_text>1. first, get sort the contours by size. 4 contours with largest area. 2. second, sort by left to right<jupyter_code># From here: https://stackoverflow.com/questions/28759253/how-to-crop-the-internal-area-of-a-contour def remove_duplicates(lines): # remove duplicate lines (lines within 10 pixels of eachother) for line in lines: for x1,y1,x2,y2 in line: #for x1, y1, x2, y2 in lines: for index, (x3, y3, x4, y4) in enumerate(lines): if y1 == y2 and y3 == y4: diff = abs(y1-y3) elif x1 == x2 and x3 == x4: diff = abs(x1-x3) else: diff = 0 if diff < 10 and diff is not 0: del lines[index] return lines def sort_line_list(lines): # sort lines into horizontal and vertical vertical = [] horizontal = [] for line in lines: if line[0] == line[2]: vertical.append(line) elif line[1] == line[3]: horizontal.append(line) vertical.sort() horizontal.sort(key=lambda x: x[1]) return horizontal, vertical # Below script gets the table outline for contour index 3 idx = 3 mask = np.zeros_like(img) # Create mask where white is what we want, black otherwise cv2.drawContours(mask, contours, idx, (255,255,255), -1) # Draw filled contour in mask out = np.zeros_like(img) # Extract out the object and place into output image out[mask == 255] = img[mask == 255] # Crop masked image #(x, y) = np.where(mask == 255) #print(np.where(mask == 255)) #(topx, topy) = (np.min(x), np.min(y)) #(bottomx, bottomy) = (np.max(x), np.max(y)) #out = out[topx:bottomx+1, topy:bottomy+1] # Convert to grayscale and detect edges gray = cv2.cvtColor(out, cv2.COLOR_BGR2GRAY) edges = cv2.Canny(gray, 50, 150, apertureSize = 3) # Apply Hough Transformation lines = cv2.HoughLinesP(edges, rho = 1, theta = np.pi / 180, threshold = 200, minLineLength = 20, maxLineGap = 50) # Create a blank image to draw lines on line_image = np.copy(img) * 0 # Draw lines from hough transform vertical = [] horizontal = [] for line in lines: for x1, y1, x2, y2 in line: cv2.line(out, (x1,y1), (x2,y2), (255,255,255), 5) if x1 == x2: vertical.append(line) print("vertical") print(y2 - y1) elif y1 == y2: horizontal.append(line) print("horizontal") print(x2 - x1) cv2.imwrite("output.jpg", out) # sort them #vertical = np.sort(vertical, axis=0) #print(vertical) #horizontal = np.sort(horizontal, axis = 1) # remove duplicates #lines = remove_duplicates(lines) # sort lines into vertical & horizontal lists #horizontal, vertical = sort_line_list(lines) # use my function defined above hough_transform(out, 200, 20, 50) try: import Image except ImportError: from PIL import Image import pytesseract # If you don't have tesseract executable in your PATH, include the following: pytesseract.pytesseract.tesseract_cmd = r'C:\Program Files (x86)\Tesseract-OCR\tesseract' # Define config parameters. # '-l eng' for using the English language # '--oem 1' for using LSTM OCR Engine config = ('-l eng --oem 1 --psm 3') # Read image from disk #im = cv2.imread(imPath, cv2.IMREAD_COLOR) img_crop = cv2.imread('output_cropped.jpg') # Run tesseract OCR on image text = pytesseract.image_to_string(img_crop, config=config) counts = text.split() print(counts)<jupyter_output>['14', '20', '10', '30', '36', '22', '1061', '1071', '535', '889', '815', '382', '116', '98', '48', '74', '64', '45', '1191', '1189', '593', '993', '915', '449'] <jupyter_text>New Workflow: 1. Detect large boxes using contours 2. Crop image (function) 3. Using hough transformation, detect table lines (function) 4. Recolor table lines to white, crop image 5. Run tesseract on resulting table, extract numbers<jupyter_code>### Import libraries import numpy as np import cv2 from operator import itemgetter, attrgetter try: import Image except ImportError: from PIL import Image import pytesseract # Set path to tesseract executable pytesseract.pytesseract.tesseract_cmd = r'C:\Program Files (x86)\Tesseract-OCR\tesseract' # Define config parameters. # '-l eng' for using the English language # '--oem 1' for using LSTM OCR Engine config = ('-l eng --oem 0 --psm 10000 -c tessedit_char_whitelist=0123456789') def GetContours(img): # Prep image imgray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) ret, thresh = cv2.threshold(imgray, 127, 255, 0) # Run contour analysis, sort by contour area (descending) im2, contours, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) contours = sorted(contours, key=cv2.contourArea, reverse = True) return(contours) def CropImage(img, contour): (x, y, w, h) = cv2.boundingRect(contour) crop_img = img[y:y+h, x:x+w] return(crop_img) def TesseractText(img): text = pytesseract.image_to_string(img, config=config) counts = list(map(int, text.split())) # hmm, maybe here i shoudl be concatenating everything, instead of eventually # only returning the first object in the list return(counts) def ExtractCellVal(cells, img): vol = [] # for each cell, crop & extract text for cell in cells: (x, y, w, h) = cell[1], cell[2], cell[3], cell[4] crop_img = img[y:y+h, x:x+w] val = TesseractText(crop_img) vol.append(val[0]) return(vol) def SortPedCells(contours): # Get the bounding box of each contour contour_list = [] contour_len = len(contours) for contour in contours: (x, y, w, h) = cv2.boundingRect(contour) contour_list.append([contour, x, y, w, h]) contour_a = np.array(contour_list) # Sort by x coordinate, split by number of columns (in this case, 2) contour_a = contour_a[contour_a[:,1].argsort()] pedvol = contour_a[:6] schvol = contour_a[6:] # Sort top to bottom (descending) by y coordinate pedvol = pedvol[pedvol[:,2].argsort()] schvol = schvol[schvol[:,2].argsort()] return(pedvol, schvol) def AnalyzePedCrossingTable(img, pedtbl_contour): # Crop Image, get new contours crop_img = CropImage(img, pedtbl_contour[0]) pedvol_contours = GetContours(crop_img) pedvol_cells = pedvol_contours[2:14] pedvol_cells, schvol_cells = SortPedCells(pedvol_cells) pedvol = ExtractCellVal(pedvol_cells, crop_img) schvol = ExtractCellVal(schvol_cells, crop_img) return(dict([("Ped", pedvol), ("Sch", schvol)])) def GetPedData(img): ped_tbl_contours = GetContours(img)[5:9] ped_tbls = [] for ped_tbl_contour in ped_tbl_contours: (x, y, w, h) = cv2.boundingRect(ped_tbl_contour) ped_tbls.append([ped_tbl_contour, x, y, w, h]) ped_tbls = np.array(ped_tbls) ped_tbls = sorted(ped_tbls, key=itemgetter(1)) ped_tbls = sorted(ped_tbls, key=itemgetter(2)) ped_sch_extract = {} ped_sch_extract['SL'] = AnalyzePedCrossingTable(img, ped_tbls[:1][0]) ped_sch_extract['NL'] = AnalyzePedCrossingTable(img, ped_tbls[1:2][0]) ped_sch_extract['WL'] = AnalyzePedCrossingTable(img, ped_tbls[2:3][0]) ped_sch_extract['EL'] = AnalyzePedCrossingTable(img, ped_tbls[3:4][0]) # Format as final df ped_sch_data = [] for leg in ped_sch_extract: for pedtype in ped_sch_extract[leg]: ped_sch_dict = {} ped_sch_dict['xing_leg'] = leg ped_sch_dict['type'] = pedtype ped_sch_dict['volume'] = sum(ped_sch_extract[leg][pedtype]) ped_sch_data.append(ped_sch_dict) return(ped_sch_data) img = cv2.imread('images2/1ST.FRESNO.160608-MAN.png') ManualTC['Pedestrian'] = GetPedData(img) <jupyter_output>[{'xing_leg': 'SL', 'type': 'Ped', 'volume': 73}, {'xing_leg': 'SL', 'type': 'Sch', 'volume': 29}, {'xing_leg': 'NL', 'type': 'Ped', 'volume': 0}, {'xing_leg': 'NL', 'type': 'Sch', 'volume': 0}, {'xing_leg': 'WL', 'type': 'Ped', 'volume': 32}, {'xing_leg': 'WL', 'type': 'Sch', 'volume': 1}, {'xing_leg': 'EL', 'type': 'Ped', 'volume': 35}, {'xing_leg': 'EL', 'type': 'Sch', 'volume': 9}] <jupyter_text>##### Convert PDF Images to PNG for Processing with CV2 Before doing conversions from PDF to PNG, make sure to install ImageMagick (along with wand) as well as Ghostscript. Add more detail.<jupyter_code>from wand.image import Image, Color import glob, os # Grab all PDFs within the folder files = glob.glob('C:/Users/Tim/Documents/GitHub/vehicle-vol-pdf-scrape/HoughTranformTest/images2/*.pdf') file_names = [os.path.abspath(file) for file in files] for file_name in file_names: fin, file_extension = os.path.splitext(file_name) fout = fin + '.png' with Image(filename=file_name, resolution=300) as img: img.background_color = Color("white") img.alpha_channel = 'remove' img.save(filename=fout) <jupyter_output><empty_output><jupyter_text>##### Testing Stuctural Similarity Index (SSIM) <jupyter_code># import the necessary packages from skimage.measure import compare_ssim as ssim from skimage.measure import compare_mse as mse import matplotlib.pyplot as plt import numpy as np import cv2 #hist1 = cv2.calcHist([image],[0],None,[256],[0,256]) #hist2 = cv2.calcHist([image1],[0],None,[256],[0,256]) #compare = cv2.compareHist(hist1,hist2,CV_COMP_CORREL) # Import images imageA = cv2.imread("images2/1ST.ALAMEDA.140409LATESHIFT-MAN.png") #imageA = cv2.cvtColor(imageA, cv2.COLOR_BGR2GRAY) histA = cv2.calcHist([imageA],[0],None,[256],[0,256]) # Grab all PDFs within the folder files = glob.glob('C:/Users/Tim/Documents/GitHub/vehicle-vol-pdf-scrape/HoughTranformTest/images2/*.png') file_names = [os.path.abspath(file) for file in files] for file_name in file_names: imageB = cv2.imread(file_name) # Print filename print(file_name) # convert the images to grayscale #imageB = cv2.cvtColor(imageB, cv2.COLOR_BGR2GRAY) histB = cv2.calcHist([imageB],[0],None,[256],[0,256]) # compute SSIM #s = ssim(imageA, imageB) #m = mse(imageA, imageB) #print(s, m) compare = cv2.compareHist(histA, histB, 0) print(compare)<jupyter_output>C:\Users\Tim\Documents\GitHub\vehicle-vol-pdf-scrape\HoughTranformTest\images2\1ST.ALAMEDA.140409LATESHIFT-MAN.png 1.0 C:\Users\Tim\Documents\GitHub\vehicle-vol-pdf-scrape\HoughTranformTest\images2\1ST.ALAMEDAEARLYSHIFT.140227-MAN.png 1.0 C:\Users\Tim\Documents\GitHub\vehicle-vol-pdf-scrape\HoughTranformTest\images2\1ST.BEAUDRY.150203-MAN.png 1.0 C:\Users\Tim\Documents\GitHub\vehicle-vol-pdf-scrape\HoughTranformTest\images2\1ST.CUMMINGS.120614-MAN.png 1.0 C:\Users\Tim\Documents\GitHub\vehicle-vol-pdf-scrape\HoughTranformTest\images2\1ST.DACOTAH.120615-MAN.png 1.0 C:\Users\Tim\Documents\GitHub\vehicle-vol-pdf-scrape\HoughTranformTest\images2\1ST.DACOTAH.160602-MAN.png 1.0 C:\Users\Tim\Documents\GitHub\vehicle-vol-pdf-scrape\HoughTranformTest\images2\1ST.EVERGREEN.160519-MAN.png 1.0 C:\Users\Tim\Documents\GitHub\vehicle-vol-pdf-scrape\HoughTranformTest\images2\1ST.FRESNO.160608-MAN.png 1.0 C:\Users\Tim\Documents\GitHub\vehicle-vol-pdf-scrape\HoughTranformTest\images2\1ST.HILL.160202-MAN-[...]
no_license
/HoughTranformTest/Untitled.ipynb
black-tea/vehicle-vol-pdf-scrape
9
<jupyter_start><jupyter_text># Step 4. Analyses and visualizations inputs: - `preprocessed_merfish_ad_mouse_rotated.hdf5` - `clustering_embedding_Xu2_5_R0_Xu2_5_R1_Xu2_6_R0_Xu2_6_R1_...tsv.gz` - `palette_admouse.json` outputs: - cluster visualizations - gene level analyses<jupyter_code>import json import h5py import tqdm import datetime import time import pandas as pd import numpy as np import collections from scipy import stats import datashader as ds import colorcet import sys sys.path.insert(0, '/cndd2/fangming/projects/SingleCellRoutines') import utils import powerplot from __init__plots import * from _powerplot_vizgen_merfish import * today = datetime.date.today() analysis_name = "AD_mouse" figures = '../figures/{}_{}_{{}}'.format(today, analysis_name) results = '../data/summary_tables_admouse/{}_{}_{{}}'.format(today, analysis_name) figures, results !mkdir -p ../figures<jupyter_output><empty_output><jupyter_text># read in processed data - cell metadata - cell gene matrix (normalized) - cell clusters and umap - palette and other metadata<jupyter_code># palette as json file = '../data/summary_tables_admouse/palette_admouse.json' with open(file, 'r') as f: palette = json.load(f) palette !ls ../data/summary_tables_admouse/*.tsv.gz f = '../data/summary_tables_admouse/clustering_embedding_Xu2_5_R0_Xu2_5_R1_Xu2_6_R0_Xu2_6_R1_2021-07-02.tsv.gz' clsts = pd.read_csv(f, sep='\t').set_index('cell') clsts['cluster_cat'] = pd.Categorical(clsts['cluster']) #.astype print(clsts.shape) clsts.head() clsts = clsts[clsts['sample'] == 'Xulab_2_5_region_1'] print(clsts.shape) input = '../data/summary_tables_admouse/processed_merfish_ad_mouse_rotated.hdf5' samples = [ # 'Xulab_2_5_region_0', 'Xulab_2_5_region_1', # 'Xulab_2_6_region_0', # 'Xulab_2_6_region_1', ] samples_annot = { 'Xulab_2_5_region_0': 'WT 2-5', 'Xulab_2_5_region_1': '5xFAD 2-5', 'Xulab_2_6_region_0': 'WT 2-6', 'Xulab_2_6_region_1': '5xFAD 2-6', } gmat = [] meta = [] for sample in samples: _gmat = pd.read_hdf(input, 'mat_'+sample) gmat.append(_gmat) _meta = pd.read_hdf(input, 'meta_'+sample) _meta['sample'] = sample meta.append(_meta) gmat = pd.concat(gmat) meta = pd.concat(meta) # informations thedata = clsts.drop('sample', axis=1).join(meta) #.join(clsts) genes = gmat.columns thedatagmat = thedata.join(gmat) print(len(genes), gmat.shape, thedata.shape)<jupyter_output>150 (107368, 150) (107368, 17) <jupyter_text>### Run gene viz<jupyter_code># run genes view selected_genes = ['Slc17a7'] for gene in tqdm.tqdm(selected_genes): # insitu x, y, hue = 'x', 'y', gene output = figures.format("insitu_"+gene+".pdf") fig_plot_gene_insitu_routine( thedatagmat, samples, x, y, hue, samples_annot=samples_annot, nx=2, ny=2, figsize=(9*2,6*2), # close=True, # output='', output=output, close=False, ) print(output) # umap x, y, hue = 'umap_1', 'umap_2', gene output = figures.format("umap_"+gene+".pdf") fig_plot_gene_umap_routine( thedatagmat, x, y, hue, output=output, close=False, # close=True, ) print(output) break<jupyter_output> 0%| | 0/1 [00:00<?, ?it/s]<jupyter_text>### Run cluster viz (combined)<jupyter_code>clusters = np.sort(thedata['cluster_cat'].cat.categories.values) clstcolors_obj = powerplot.CategoricalColors(clusters) palette_clst = clstcolors_obj.to_dict(to_hex=True, output='../data/summary_tables_admouse/palette_clustering_Xu2_5_R0_Xu2_5_R1_Xu2_6_R0_Xu2_6_R1_2021-07-02.json') palette_clst # agg data for each sample x, y, hue = 'x', 'y', 'cluster_cat' suptitle = 'colored by cluster' output = figures.format("insitu_allclsts"+".pdf") close = False fig_plot_cluster_insitu_routine( thedata, samples, x, y, hue, clstcolors_obj, samples_annot=samples_annot, suptitle=suptitle, nx=2, ny=2, figsize=(9*2,6*2), close=close, output=output, ) # plot all clusters UMAP x, y, hue = 'umap_1', 'umap_2', 'cluster_cat' title = 'colored by cluster' output = figures.format("umap_allclsts"+".pdf") close = False fig_plot_cluster_umap_routine( thedata, x, y, hue, clstcolors_obj, title=title, close=close, output=output, ) cat_col = 'sample_cat' thedata[cat_col] = pd.Categorical(thedata['sample']) categories = np.sort(thedata[cat_col].cat.categories.values) clstcolors_obj = powerplot.CategoricalColors(categories, [palette[catg] for catg in categories]) # agg data for each sample x, y, hue = 'x', 'y', cat_col suptitle = 'colored by sample' output = figures.format("insitu_allclsts_"+cat_col+".pdf") close = False fig_plot_cluster_insitu_routine( thedata, samples, x, y, hue, clstcolors_obj, samples_annot=samples_annot, suptitle=suptitle, nx=2, ny=2, figsize=(9*2,6*2), close=close, output=output, ) # plot all clusters UMAP x, y, hue = 'umap_1', 'umap_2', cat_col title = 'colored by sample' output = figures.format("umap_allclsts_"+cat_col+".pdf") close = False fig_plot_cluster_umap_routine( thedata, x, y, hue, clstcolors_obj, title=title, close=close, output=output, ) # # selected clusters (preserve colors) # clstcolors_obj = powerplot.CategoricalColors( # clusters) # colors = clstcolors_obj.colors # selected_clusters = ['C15', 'C16', 'C19'] # selected_colors = [ # color if label in selected_clusters else 'lightgray' # for color, label in zip(colors, clusters) # ] # selected_clstcolors_obj = powerplot.CategoricalColors( # clusters, selected_colors, # ) # selected clusters (not preserve colors) colors = ['lightgray']*len(clusters) selected_clusters = ['C15', 'C16', 'C19'] selected_colors = powerplot.CategoricalColors(selected_clusters).colors for i, clst in enumerate(selected_clusters): idx = np.where(clusters==clst)[0][0] colors[idx] = selected_colors[i] selected_clstcolors_obj = powerplot.CategoricalColors(clusters, colors) # agg data for each sample x, y, hue = 'x', 'y', 'cluster_cat' suptitle = 'colored by hippo cluster' output = figures.format("insitu_hippoclsts"+".pdf") close = False fig_plot_cluster_insitu_routine( thedata, samples, x, y, hue, selected_clstcolors_obj, suptitle=suptitle, samples_annot=samples_annot, nx=2, ny=2, figsize=(9*2,6*2), close=close, output=output, ) # plot all clusters UMAP x, y, hue = 'umap_1', 'umap_2', 'cluster_cat' title = 'colored by hippo cluster' output = figures.format("umap_hippoclsts"+".pdf") close = False fig_plot_cluster_umap_routine( thedata, x, y, hue, selected_clstcolors_obj, title=title, close=close, output=output, )<jupyter_output>/cndd2/fangming/venvs/routine/lib/python3.8/site-packages/matplotlib/colors.py:235: VisibleDeprecationWarning: Creating an ndarray from ragged nested sequences (which is a list-or-tuple of lists-or-tuples-or ndarrays with different lengths or shapes) is deprecated. If you meant to do this, you must specify 'dtype=object' when creating the ndarray. c = np.array(c) <jupyter_text># barplots -- number of cells for each clusters## all gene analyses - WT vs 5xFAD (2), cell types (N), replicates (2) - get an heatmap - Ngenes * 4Ncelltypes <jupyter_code># select hemi-brain sections filter_cond = ( ((thedatagmat['sample'] == 'Xulab_2_5_region_0') & (thedatagmat['x'] > 0)) | ((thedatagmat['sample'] == 'Xulab_2_5_region_1') & (thedatagmat['x'] < 0)) | ((thedatagmat['sample'] == 'Xulab_2_6_region_0') & (thedatagmat['x'] < 0)) | ((thedatagmat['sample'] == 'Xulab_2_6_region_1') & (thedatagmat['x'] < 0)) ) # selecteddata thedatagmat['sample_name'] = thedatagmat['sample'].apply(lambda x: samples_annot[x]) selecteddata = thedatagmat[filter_cond] # mean, std, n, zscore gmat_mean = selecteddata.groupby(['cluster', 'sample_name']).mean()[genes] gmat_std = selecteddata.groupby(['cluster', 'sample_name']).std()[genes] gmat_n = selecteddata.groupby(['cluster', 'sample_name']).count()[genes] gmat_meanz = (gmat_mean - gmat_mean.mean())/gmat_mean.std() # celltypes celltypes = gmat_mean.index.levels[0].values print(thedatagmat.shape, selecteddata.shape) print(gmat_mean.shape, gmat_std.shape, gmat_n.shape, gmat_meanz.shape) # mean diff and statistical significant diff rescale = np.log(10)/np.log(2) ctrl, case = 'WT 2-5', '5xFAD 2-5' query_ctrl = 'sample_name == "{}"'.format(ctrl) query_case = 'sample_name == "{}"'.format(case) diff1 = (gmat_mean.query(query_case).droplevel(level=1) - gmat_mean.query(query_ctrl).droplevel(level=1) ).loc[celltypes]*rescale diff1['slice'] = "2-5" diff1 = diff1.set_index('slice', append=True) ctrl, case = 'WT 2-6', '5xFAD 2-6' query_ctrl = 'sample_name == "{}"'.format(ctrl) query_case = 'sample_name == "{}"'.format(case) diff2 = (gmat_mean.query(query_case).droplevel(level=1) - gmat_mean.query(query_ctrl).droplevel(level=1) ).loc[celltypes]*rescale diff2['slice'] = "2-6" diff2 = diff2.set_index('slice', append=True) diffmat = pd.concat([diff1, diff2]).sort_index(level=0) print(diffmat.shape) # sig diff alpha = 0.05 ctrl, case = 'WT 2-5', '5xFAD 2-5' query_ctrl = 'sample_name == "{}"'.format(ctrl) query_case = 'sample_name == "{}"'.format(case) t, p = stats.ttest_ind_from_stats( gmat_mean.query(query_ctrl).loc[celltypes].values, gmat_std.query(query_ctrl).loc[celltypes].values, gmat_n.query(query_ctrl).loc[celltypes].values, gmat_mean.query(query_case).loc[celltypes].values, gmat_std.query(query_case).loc[celltypes].values, gmat_n.query(query_case).loc[celltypes].values, ) p[np.isnan(p)] = 1 q = utils.get_fdr(p.reshape(-1)).reshape(p.shape) q = pd.DataFrame(q, index=celltypes, columns=genes) q['slice'] = "2-5" q = q.set_index('slice', append=True) rej = q<alpha q1 = q rej1 = rej ctrl, case = 'WT 2-6', '5xFAD 2-6' query_ctrl = 'sample_name == "{}"'.format(ctrl) query_case = 'sample_name == "{}"'.format(case) t, p = stats.ttest_ind_from_stats( gmat_mean.query(query_ctrl).loc[celltypes].values, gmat_std.query(query_ctrl).loc[celltypes].values, gmat_n.query(query_ctrl).loc[celltypes].values, gmat_mean.query(query_case).loc[celltypes].values, gmat_std.query(query_case).loc[celltypes].values, gmat_n.query(query_case).loc[celltypes].values, ) p[np.isnan(p)] = 1 q = utils.get_fdr(p.reshape(-1)).reshape(p.shape) q = pd.DataFrame(q, index=celltypes, columns=genes) q['slice'] = "2-6" q = q.set_index('slice', append=True) rej = q<alpha q2 = q rej2 = rej qmat = pd.concat([q1, q2]).sort_index(level=0) rejmat = pd.concat([rej1, rej2]).sort_index(level=0) # rejmat output = figures.format("gene_by_clusters_separate_samples.pdf") heatmat = gmat_meanz.T heatmat_reduced = heatmat.mean(level=0, axis=1) nrow, ncol = heatmat.shape # order1 _, rowidx, colidx_reduced = utils.diag_matrix(heatmat_reduced.values, threshold=1) colidx = np.hstack([4*col+np.array([0,1,2,3]) for col in colidx_reduced]) # order2 row_order = utils.get_order_from_hierarchy(heatmat) col_order_reduced = utils.get_order_from_hierarchy(heatmat_reduced.T) # by cluster; collapse slices col_order = np.hstack([4*col+np.array([0,1,2,3]) for col in col_order_reduced]) # order3 (curated col_order_reduced) _, rowidx_v3, _ = utils.diag_matrix_rows(heatmat_reduced.iloc[:,col_order_reduced].values) fig, ax = plt.subplots(figsize=(20, 30)) sns.heatmap( # heatmat.iloc[row_order, col_order], # heatmat.iloc[rowidx, colidx], heatmat.iloc[rowidx_v3, col_order], vmax=3, vmin=-3, center=0, xticklabels=True, yticklabels=True, cmap=colorcet.coolwarm, ax=ax, cbar_kws=dict(shrink=0.2, label='gene expression in zscore across cell types and samples'), ) ax.set_yticklabels(ax.get_yticklabels(), fontsize=10) ax.tick_params(axis=u'both', which=u'both',length=0) xtl = [tl.get_text() for tl in ax.get_xticklabels()] xtl_part1 = [tl.split('-')[0] for tl in xtl] xtl_part2 = ['-'.join(tl.split('-')[1:]) for tl in xtl] xtl_simp = list(utils.dedup_array_elements(xtl_part1)) ax.set_xticklabels(xtl_simp) for i in range(4): ax.annotate(xtl_part2[i], (i+0.5, 0), (2*i, -2), arrowprops=dict(arrowstyle='-', color='black'), va='bottom', ha='center', rotation=90, ) ax.vlines(np.arange(0, ncol, 4)[1:], 0, nrow, color='black', linewidth=1, clip_on=False) ax.vlines(np.arange(2, ncol, 4), 0, nrow, color='gray', linewidth=0.5, clip_on=False) utils.savefig(fig, output) print(output) plt.show() cluster_orders = col_order_reduced heatmat = gmat_meanz.loc[selected_clusters].T heatmat_reduced = heatmat.mean(level=0, axis=1) nrow, ncol = heatmat.shape # order1 _, rowidx, colidx_reduced = utils.diag_matrix(heatmat_reduced.values, threshold=1) colidx = np.hstack([4*col+np.array([0,1,2,3]) for col in colidx_reduced]) # order2 row_order = utils.get_order_from_hierarchy(heatmat) col_order_reduced = utils.get_order_from_hierarchy(heatmat_reduced.T) # by cluster; collapse slices col_order = np.hstack([4*col+np.array([0,1,2,3]) for col in col_order_reduced]) # order3 (curated col_order_reduced) _, rowidx_v3, _ = utils.diag_matrix_rows(heatmat_reduced.iloc[:,col_order_reduced].values) fig, ax = plt.subplots(figsize=(4, 30)) sns.heatmap( # heatmat.iloc[row_order, col_order], # heatmat.iloc[rowidx, colidx], heatmat.iloc[rowidx_v3, col_order], vmax=3, vmin=-3, center=0, xticklabels=True, cmap=colorcet.coolwarm, cbar_kws=dict(shrink=0.2, label='gene expression in zscore across cell clusters and samples'), ax=ax, ) ax.set_yticklabels(ax.get_yticklabels(), fontsize=10) ax.tick_params(axis=u'both', which=u'both',length=0) xtl = [tl.get_text() for tl in ax.get_xticklabels()] xtl_part1 = [tl.split('-')[0] for tl in xtl] xtl_part2 = ['-'.join(tl.split('-')[1:]) for tl in xtl] xtl_simp = list(utils.dedup_array_elements(xtl_part1)) ax.set_xticklabels(xtl_simp, fontsize=12) for i in range(4): ax.annotate(xtl_part2[i], (i+0.5, 0), (2*i, -2), arrowprops=dict(arrowstyle='-', color='black'), va='bottom', ha='center', rotation=90, fontsize=12, ) ax.vlines(np.arange(0, ncol, 4)[1:], 0, nrow, color='black', linewidth=1, clip_on=False) ax.vlines(np.arange(2, ncol, 4), 0, nrow, color='gray', linewidth=0.5, clip_on=False) plt.show() output = figures.format("gene_diff_5xFAD_vs_WT.pdf") heatmat = (rejmat*diffmat).T nrow, ncol = heatmat.shape heatmat_reduced = heatmat.mean(level=0, axis=1) # order1 _, rowidx, colidx_reduced = utils.diag_matrix(heatmat_reduced.values, threshold=1) colidx = np.hstack([2*col+np.array([0,1,]) for col in colidx_reduced]) # order2 row_order = utils.get_order_from_hierarchy(heatmat) col_order_reduced = utils.get_order_from_hierarchy(heatmat_reduced.T) # by cluster; collapse slices col_order = np.hstack([2*col+np.array([0,1,]) for col in col_order_reduced]) # order3 (curated col_order_reduced) _, rowidx_v3, _ = utils.diag_matrix_rows( heatmat_reduced.iloc[:,col_order_reduced].abs().values) _, rowidx_v3, _ = utils.diag_matrix_rows( heatmat_reduced.iloc[:,cluster_orders].abs().values) # order4 colidx_v4 = np.hstack([2*col+np.array([0,1,]) for col in cluster_orders]) # order5 rowidx_v5 = np.argsort(heatmat.sum(axis=1)) fig, ax = plt.subplots(figsize=(15, 30)) sns.heatmap( heatmat.iloc[rowidx_v5, colidx_v4], # heatmat.iloc[row_order, colidx_v4], # heatmat.iloc[rowidx_v3, colidx_v4], # heatmat.iloc[row_order, col_order], # heatmat.iloc[rowidx, colidx], # heatmat.iloc[rowidx_v3, col_order], # heatmat, vmax=1, vmin=-1, center=0, xticklabels=True, yticklabels=True, cmap=colorcet.coolwarm, ax=ax, cbar_kws=dict(shrink=0.2, label='log2(Fold change) 5xFAD vs. WT \n(only elements with FDR<0.05 are shown)'), ) ax.set_yticklabels(ax.get_yticklabels(), fontsize=10) ax.tick_params(axis=u'both', which=u'both',length=0) xtl = [tl.get_text() for tl in ax.get_xticklabels()] xtl_part1 = [tl.split('-')[0] for tl in xtl] xtl_part2 = ['-'.join(tl.split('-')[1:]) for tl in xtl] xtl_simp = list(utils.dedup_array_elements(xtl_part1)) ax.set_xticklabels(xtl_simp) for i in range(2): ax.annotate(xtl_part2[i], (i+0.5, 0), (2*i, -2), arrowprops=dict(arrowstyle='-', color='black'), va='bottom', ha='center', rotation=90, ) ax.set_xlabel('cell clusters and slices') ax.vlines(np.arange(0, ncol, 2)[1:], 0, nrow, color='black', linewidth=1, clip_on=False) utils.savefig(fig, output) print(output) plt.show() output = figures.format("gene_diff_hippo.pdf") heatmat = (rejmat*diffmat).loc[selected_clusters].T nrow, ncol = heatmat.shape heatmat_reduced = heatmat.mean(level=0, axis=1) # order1 _, rowidx, colidx_reduced = utils.diag_matrix(heatmat_reduced.values, threshold=1) colidx = np.hstack([2*col+np.array([0,1,]) for col in colidx_reduced]) # order2 row_order = utils.get_order_from_hierarchy(heatmat) col_order_reduced = utils.get_order_from_hierarchy(heatmat_reduced.T) # by cluster; collapse slices col_order = np.hstack([2*col+np.array([0,1,]) for col in col_order_reduced]) # order3 (curated col_order_reduced) _, rowidx_v3, _ = utils.diag_matrix_rows( heatmat_reduced.iloc[:,col_order_reduced].abs().values) # # order4 # colidx_v4 = np.hstack([2*col+np.array([0,1,]) for col in cluster_orders]) # order5 rowidx_v5 = np.argsort(heatmat.sum(axis=1)) fig, ax = plt.subplots(figsize=(4, 30)) sns.heatmap( heatmat.iloc[rowidx_v5, col_order], # heatmat.iloc[rowidx_v5, colidx_v4], # heatmat.iloc[row_order, colidx_v4], # heatmat.iloc[rowidx_v3, colidx_v4], # heatmat.iloc[row_order, col_order], # heatmat.iloc[rowidx, colidx], # heatmat.iloc[rowidx_v3, col_order], # heatmat, vmax=1, vmin=-1, center=0, xticklabels=True, yticklabels=True, cmap=colorcet.coolwarm, ax=ax, cbar_kws=dict(shrink=0.2, label='log2(Fold change) 5xFAD vs. WT \n(only elements with FDR<0.05 are shown)'), ) ax.set_yticklabels(ax.get_yticklabels(), fontsize=10) ax.tick_params(axis=u'both', which=u'both',length=0) xtl = [tl.get_text() for tl in ax.get_xticklabels()] xtl_part1 = [tl.split('-')[0] for tl in xtl] xtl_part2 = ['-'.join(tl.split('-')[1:]) for tl in xtl] xtl_simp = list(utils.dedup_array_elements(xtl_part1)) ax.set_xticklabels(xtl_simp) for i in range(2): ax.annotate(xtl_part2[i], (i+0.5, 0), (2*i, -2), arrowprops=dict(arrowstyle='-', color='black'), va='bottom', ha='center', rotation=90, ) ax.set_xlabel('cell clusters and slices') ax.vlines(np.arange(0, ncol, 2)[1:], 0, nrow, color='black', linewidth=1, clip_on=False) utils.savefig(fig, output) print(output) plt.show()<jupyter_output>../figures/2021-07-02_AD_mouse_gene_diff_hippo.pdf
non_permissive
/analysis_round2/unorganized/4.analyses_and_visualizations-debug.ipynb
FangmingXie/merfish_vizgen
5
<jupyter_start><jupyter_text># IBM Developer Skills Network# Reading data from CSV file<jupyter_code>import pandas as pd url = 'https://raw.githubusercontent.com/sonpn82/Python-for-data-science/master/addresses.csv' df = pd.read_csv(url) df<jupyter_output><empty_output><jupyter_text>## Add column name to dataframe<jupyter_code>df.columns =['First Name', 'Last Name', 'Location ', 'City','State','Area Code'] df<jupyter_output><empty_output><jupyter_text>## Select a single column<jupyter_code>df['First Name']<jupyter_output><empty_output><jupyter_text>## Select multiple columns<jupyter_code>df = df[['First Name', 'Last Name', 'Location ', 'City','State','Area Code']] df<jupyter_output><empty_output><jupyter_text>## Select row using iloc and loc<jupyter_code>df.loc[0] df.loc[[0,1,2], 'First Name'] # use column name df.iloc[[0,1,2], 0] # use column index<jupyter_output><empty_output><jupyter_text>## Transform function in pandas<jupyter_code>import pandas as pd import numpy as np # creating a dataframe df=pd.DataFrame(np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]), columns=['a', 'b', 'c']) df df = df.transform(func= lambda x : x + 10) # using lambda function with input x, output x + 10 df result = df.transform(func= ['sqrt']) result<jupyter_output><empty_output><jupyter_text># JSON file Format<jupyter_code>import json person = { 'first_name' : 'Mark', 'last_name' : 'abc', 'age' : 27, 'address': { "streetAddress": "21 2nd Street", "city": "New York", "state": "NY", "postalCode": "10021-3100" } } path=os.path.join(os.getcwd(),'person.json') # path to save the json file path with open(path, 'w') as f: json.dump(person, f) # using dump to write file # Serializing json json_object = json.dumps(person, indent = 4) # convert dictionary to json object with open(path, 'w') as outfile: outfile.write(json_object) # write to file print(json_object)<jupyter_output>{ "first_name": "Mark", "last_name": "abc", "age": 27, "address": { "streetAddress": "21 2nd Street", "city": "New York", "state": "NY", "postalCode": "10021-3100" } } <jupyter_text>## Reading json into a file<jupyter_code>import json with open(path, 'r') as openfile: json_object = json.load(openfile) print(json_object)<jupyter_output>{'first_name': 'Mark', 'last_name': 'abc', 'age': 27, 'address': {'streetAddress': '21 2nd Street', 'city': 'New York', 'state': 'NY', 'postalCode': '10021-3100'}} <jupyter_text># XLSX file format## Reading xlsx file<jupyter_code>import pandas as pd import urllib.request url = 'https://github.com/sonpn82/Python-for-data-science/blob/master/file_example_XLSX_10.xlsx?raw=true' # link to excel file urllib.request.urlretrieve(url, 'sample.xlsx') # download the file df = pd.read_excel("sample.xlsx") # read to dataframe df.head()<jupyter_output><empty_output><jupyter_text># XML file Format<jupyter_code>import xml.etree.cElementTree as ET # create the file structure employee = ET.Element('employee') details = ET.SubElement(employee, 'details') first = ET.SubElement(details, 'firstname') second = ET.SubElement(details, 'lastname') third = ET.SubElement(details, 'age') first.text = 'Shiv' second.text = 'Mishra' third.text = '23' # create a new XML file with the results mydata1 = ET.ElementTree(employee) # myfile = open("items2.xml", "wb") # myfile.write(mydata) with open("new_sample.xml", "wb") as files: mydata1.write(files) import pandas as pd import xml.etree.ElementTree as etree url = 'https://raw.githubusercontent.com/sonpn82/Python-for-data-science/master/Sample-employee-XML-file.xml' urllib.request.urlretrieve(url, 'Sample-employee-XML-file.xml') # download the file tree = etree.parse("Sample-employee-XML-file.xml") root = tree.getroot() columns = ["firstname", "lastname", "title", "division", "building","room"] datatframe = pd.DataFrame(columns = columns) for node in root: firstname = node.find("firstname").text lastname = node.find("lastname").text title = node.find("title").text division = node.find("division").text building = node.find("building").text room = node.find("room").text datatframe = datatframe.append(pd.Series([firstname, lastname, title, division, building, room], index = columns), ignore_index = True) datatframe<jupyter_output><empty_output><jupyter_text>## Save Data<jupyter_code># using to.csv # dataframe.to_csv('filename.csv', index = False)<jupyter_output><empty_output><jupyter_text># Binary File Format## Reading image file<jupyter_code>from PIL import Image import urllib.request urllib.request.urlretrieve("https://hips.hearstapps.com/hmg-prod.s3.amazonaws.com/images/dog-puppy-on-garden-royalty-free-image-1586966191.jpg", "dog.jpg") # Read image img = Image.open('dog.jpg') display(img)<jupyter_output><empty_output><jupyter_text># Data Analysis<jupyter_code>url = 'https://raw.githubusercontent.com/sonpn82/Python-for-data-science/master/diabetes.csv' df = pd.read_csv(url) df.head() df.shape<jupyter_output><empty_output><jupyter_text>## Statistical view of Dataset<jupyter_code>df.info() df.describe()<jupyter_output><empty_output><jupyter_text>## Check missing values<jupyter_code>missing_data = df.isnull() missing_data.head() # True mean missing value<jupyter_output><empty_output><jupyter_text>## Count missing values<jupyter_code>for column in missing_data.columns.values.tolist(): print(column) print(missing_data[column].value_counts()) print('')<jupyter_output>Pregnancies False 768 Name: Pregnancies, dtype: int64 Glucose False 768 Name: Glucose, dtype: int64 BloodPressure False 768 Name: BloodPressure, dtype: int64 SkinThickness False 768 Name: SkinThickness, dtype: int64 Insulin False 768 Name: Insulin, dtype: int64 BMI False 768 Name: BMI, dtype: int64 DiabetesPedigreeFunction False 768 Name: DiabetesPedigreeFunction, dtype: int64 Age False 768 Name: Age, dtype: int64 Outcome False 768 Name: Outcome, dtype: int64 <jupyter_text>## Correct dataf format<jupyter_code>df.dtypes<jupyter_output><empty_output><jupyter_text>## Visualization<jupyter_code># import libraries import matplotlib.pyplot as plt import seaborn as sns labels = 'Diabetic','Not Diabetic' plt.pie(df['Outcome'].value_counts(),labels=labels,autopct='%0.02f%%') plt.legend() plt.show()<jupyter_output><empty_output>
no_license
/5.5 Working with different file formats.ipynb
sonpn82/Python-for-data-science
18
<jupyter_start><jupyter_text>## Visualizing Activation Functions - Lab ## Introduction Now that you've built your own CNN and seen how to visualize feature maps, its time to practice loading a pretrained model from file and practice visualizing the learned features systematically. In this lab, you'll expand upon the code from the previous lesson in order to succinctly visualize all the channels from each layer in a CNN. ## Objectives You will be able to: * Load a saved model * Visualize the filters produced by hidden layers in a CNN## Load a Model For this lab, load the saved a model **chest_xray_all_with_augmentation_data.h5**. This saved file includes both the model architecture and the trained weights. See the `model.save()` method for further details. The model was built in order to help identify patients with pneumonia. Start simply by loading the model and pulling up a summary of the layers. (To load the model use the `keras.models.load_model` method.) <jupyter_code>from keras.models import load_model model = load_model('chest_xray_all_with_augmentation_data.h5') model.summary()<jupyter_output>Using TensorFlow backend. <jupyter_text>## Load an Image Before you plot the learned representations of the convolutional base, let's import an image and display it prior to processing. This will provide a comparison to the transformations formed by the model's feature maps. Load and display the image **person3_virus_16.jpeg**.<jupyter_code># Your code here<jupyter_output><empty_output><jupyter_text>## Transform the Image to a Tensor and Visualize Again Recall that you should always preprocess our images into tensors when using deep learning. As such, preprocess this image and then redisplay the tensor.<jupyter_code># Your code here<jupyter_output><empty_output><jupyter_text>## Plot Feature Maps Now that you've loaded a model, practice visualizing each of the channels for each of feature maps of the convolutional layers. Recall that this process will take a few steps. First, extract the feature maps, or layer outputs from each of the activation functions in the model. From there, generate models that transform the image from its raw state to these feature maps. From there, you can then take these transformations and visualize each channel for each feature map. To preview the results of the solution code, take a sneek peak at the Intermediate_Activations_Visualized.pdf file.<jupyter_code># Your code here<jupyter_output><empty_output>
non_permissive
/index.ipynb
swede0623/dsc-visualizing-activation-functions-lab-online-ds-pt-031119
4
<jupyter_start><jupyter_text># Tarea #1: Programación en Python### Nombre: ### CI:Primera tarea de Inteligencia Artificial. Debe ser entregada antes de la clase del próximo jueves 31 de Octubre via correo electrónico a la dirección [email protected]. El nombre del archivo debe ser `Tarea_1(00000000)` en donde en vez de los $00000000$ colocará su número de cedula. Y en la celda anterior a esta colocará su nombre completo y el número de cedula.Las respuestas debe ser colocadas en las celdas de código que dicen ```python # Respuesta: ``` Excepto para las dos primeras preguntas en donde implemantará la función directamente en la celda donde esta la definición de la misma. Sustituyendo `pass` por la implementación correspondiente1. Escriba una función `suma_de_cuadrados` que calcule la suma de cuadrados hasta $n$<jupyter_code>def suma_de_cuadrados(n): """Retorna la suma (1 + 2^2 + 3^2 + ... + n^2) Precondición: n > 0, type(n) == int >>> suma_de_cuadrados(3) 14 >>> suma_de_cuadrados(1) 1 """ pass<jupyter_output><empty_output><jupyter_text>2. Escriba una función word_lengths que tome una oración (cadena), calcule la longitud de cada palabra en esa oración y devuelva la longitud de cada palabra en una lista. Puede suponer que las palabras siempre están separadas por un carácter de espacio " ". Puede usar la función `str.split` de python<jupyter_code>help(str.split) def largo_palabras(sentencia): """Retorna una lista conteniendo el largo de cada palabra en la sentencia. >>> largo_palabras("bienvenidos a Inteligencia Artificial!") [11, 1, 12, 11] >>> largo_palabras("aprendizaje automático esta de moda") [11, 10, 4, 2, 4] """ pass<jupyter_output><empty_output><jupyter_text>3. Considere la función `extraer_and_aplicar(l, p, f)` que se muestra a continuación, que extrae los elementos de una lista $l$ que satisface un predicado booleano $p$, aplica una función $f$ a cada elemento y devuelve el resultado.<jupyter_code>def extraer_and_aplicar(l, p, f): resultado = [] for x in l: if p(x): result.append(f(x)) return resultado<jupyter_output><empty_output><jupyter_text>Reescribe `extraer_and_aplicar(l, p, f)` en una línea usando una lista de comprensión<jupyter_code># Respuesta:<jupyter_output><empty_output><jupyter_text>4. Escriba una función `concatenar(secuencias)` que devuelva una lista que contenga la concatenación de los elementos de las secuencias de entrada. Su implementación debe consistir en una comprensión de lista única, y no debe exceder una línea. ```pyton concatenar([[1, 2], [3, 4]]) [1, 2, 3, 4] concatenar(["abc", (0, [0])]) ['a', 'b', 'c', 0, [0]] ```<jupyter_code># Respuesta:<jupyter_output><empty_output><jupyter_text>5. Escriba una función `todos_menos_ultimo(secuencia)` que devuelva una nueva secuencia que contenga todos menos el último elemento de la secuencia de entrada. Si la secuencia de entrada está vacía, se debe devolver una nueva secuencia vacía del mismo tipo. ```python >>> todos_menos_ultimo("abc") 'ab' >>> todos_menos_ultimo((1, 2, 3)) (1, 2) ``````python >>> todos_menos_ultimo("") '' >>> todos_menos_ultimo([]) [] ```<jupyter_code># Respuesta: <jupyter_output><empty_output><jupyter_text>6. Escriba una función `cada_otro(secuencia)` que devuelva una nueva secuencia que contenga cada otro elementos de la secuencia de entrada, comenzando por el primero.```python >>> cada_otro([1, 2, 3, 4, 5]) [1, 3, 5] >>> cada_otro("abcde") 'ace' [] ``````python >>> cada_otro([1, 2, 3, 4, 5, 6]) [1, 3, 5] >>> cada_otro("abcdef") 'ace' ``` <jupyter_code># Respuesta:<jupyter_output><empty_output><jupyter_text>7 y 8. Los prefijos de una secuencia incluyen la secuencia vacía, el primer elemento, los dos primeros elementos, etc., hasta la secuencia completa incluida. Del mismo modo, los sufijos de una secuencia incluyen la secuencia vacía, el último elemento, los dos últimos elementos, etc., hasta la secuencia completa incluida. Escriba un par de funciones generadoras `prefijos(secuencia)` y `sufijos(secuencia)` que produzcan todos los prefijos y sufijos de la secuencia de entrada.```python >>> list(prefijos([1, 2, 3])) [[], [1], [1, 2], [1, 2, 3]] >>> list(sufijos([1, 2, 3])) [[1, 2, 3], [2, 3], [3], []] ``````python >>> list(prefixes("abc")) ['', 'a', 'ab', 'abc'] >>> list(suffixes("abc")) ['abc', 'bc', 'c', ''] ```<jupyter_code># Respuesta función prefijos: # Respuesta función sufijos:<jupyter_output><empty_output><jupyter_text>9. Escriba una función `no_vocales(texto)` que elimine todas las vocales de la cadena de entrada y devuelva el resultado.```python >>> no_vocales("Esto Es Un Ejemplo.") 'st s n jmlo.' ``````python >>> no_vocales("Nosotros amamos a Python!") 'Nstrs mm Pythn!' ```10. Escriba una función `digitos_a_palabras(texto)` que extraiga todos los dígitos de la cadena de entrada, los escriba en minúsculas en español y devuelve una nueva cadena en la que cada uno esté separado por un solo espacio. Si la cadena de entrada no contiene dígitos, se debe devolver una cadena vacía```python >>> digitos_a_palabras("Código Postal: 39104") 'tres nueve uno cero cuatro' ``````python >>> digitos_a_palabras("Pi es 3.1415...") 'tres uno cuatro uno cinco' ```<jupyter_code># Respuesta:<jupyter_output><empty_output>
no_license
/Tareas/Tarea_1(00000000).ipynb
GaReL254/InteligenciaArtificial2019
9
<jupyter_start><jupyter_text># Character Recognition using Multilayer Perceptron (MLP) MLP เป็นโครงข่าย (Network) ประเภทหนึ่งของโครงข่ายประสาทเทียมแบบแพร่ไปข้างหน้า (Feedforward Neural Network) ![MLP](https://res.mdpi.com/information/information-03-00756/article_deploy/html/images/information-03-00756-g002.png) MLP จะประกอบด้วยจำนวนชั้น (Layer) จำนวนทั้งสิ้น 3 ชั้น คือ * ชั้นนำเข้า (Input Layer) * ชั้นซ่อน (Hidden Layer) และ * ชั้นแสดงผลลัพธ์ (Output Layer) ## Layer * **Input Layer** (*z1 - zm*) เป็นชั้นที่จำนวนโหนด (Node) จะถูกกำหนดให้มีจำนวนเท่ากับแอททริบิวต์ (Attribute) ของข้อมูล ยกตัวอย่างเช่น หากข้อมูล 1 แถว (Record) มีจำนวนทั้งสิ้น 6 คอลัมน์ (Attribute) ดังนั้นใน Input Layer จะกำหนดให้มีจำนวน 6 โหนด * **Hidden Layer** (*x1 - xn*) เป็นชั้นที่สามารถกำหนดจำนวนของโหนดได้ตามความต้องการ ทั้งนี้ขึ้นอยู่กับการทดลอง โดย Hidden Layer สามารถกำหนดให้มีได้มากกว่า 1 ชั้น * **Output Layer** (*y1 - ym*) จะถูกกำหนดให้มีจำนวนโหนด (Node) เท่ากับจำนวนของคลาส (Class) หรือกลุ่ม เช่นตัวเลข 0-9 มีทั้งสิ้น 10 กลุ่ม และตัวอักษรภาษาอังกฤษ a-z มีทั้งหมด 26 กลุ่ม ## Weight * การเชื่อมโยงระหว่างโหนดในแต่ละชั้น (Layer) ได้แก่ *Vnm* และ *Wkn* เรียกว่า **Weight** หรือค่าน้ำหนัก ซึ่งได้มาจากการคำนวณ ดังนั้นในการคำนวณแต่ละรอบ (Epoch) ค่า weight จะเปลี่ยนไปตามการคำนวณจากข้อมูลที่วิ่งเข้ามาและค่า weight เดิมใช้ Keras เป็น tool ในการเขียนโปรแกรม โดยรันอยู่บน TensorFlow<jupyter_code>import keras<jupyter_output><empty_output><jupyter_text>## โหลดข้อมูล MNIST โหลดข้อมูล MNIST มาใช้งาน โดยข้อมูลแบ่งออกเป็น 4 ส่วนประกอบด้วย x_train หมายถึง ข้อมูลชุดเรียนรู้ ซึ่งเป็น pixel ของตัวเลข y_train หมายถึง Label ของข้อมูลชุดเรียนรู้ ซึ่งมีค่าตั้งแต่ 0-9 x_test หมายถึง ข้อมูลชุดทดสอบ y_test หมายถึง Label ของข้อมูลชุดทดสอบ <jupyter_code>from keras.datasets import mnist #keras will automatically download the dataset (x_train, y_train), (x_test, y_test) = mnist.load_data() (xx_train, yy_train), (xx_test, yy_test) = mnist.load_data()<jupyter_output><empty_output><jupyter_text>## แสดงตัวอย่างข้อมูล MNIST<jupyter_code>import matplotlib.pyplot as plt import random #get the size of the dataset print('type of training data', type(x_train)) print('size of training data', x_train.shape) print('size of test data', x_test.shape) #set the seed so our random tests are reproducible random.seed(49) train_size = len(x_train) r1 = random.randint(0, train_size-1) r2 = random.randint(0, train_size-1) r3 = random.randint(0, train_size-1) r4 = random.randint(0, train_size-1) # แสดงข้อมูลในส่วนของ training data plt.subplot(221) plt.imshow(x_train[r1], cmap=plt.get_cmap('gray')) plt.title(y_train[r1]) plt.subplot(222) plt.imshow(x_train[r2], cmap=plt.get_cmap('gray')) plt.title(y_train[r2]) plt.subplot(223) plt.imshow(x_train[r3], cmap=plt.get_cmap('gray')) plt.title(y_train[r3]) plt.subplot(224) plt.imshow(x_train[r4], cmap=plt.get_cmap('gray')) plt.title(y_train[r4]) # show the plot plt.show() # แสดงข้อมูลในส่วนของ test data test_size = len(x_test) random.seed(49) r1 = random.randint(0, test_size-1) r2 = random.randint(0, test_size-1) r3 = random.randint(0, test_size-1) r4 = random.randint(0, test_size-1) # แสดงข้อมูลในส่วนของ training data plt.subplot(221) plt.imshow(x_test[r1], cmap=plt.get_cmap('gray')) plt.title(y_test[r1]) plt.subplot(222) plt.imshow(x_test[r2], cmap=plt.get_cmap('gray')) plt.title(y_test[r2]) plt.subplot(223) plt.imshow(x_test[r3], cmap=plt.get_cmap('gray')) plt.title(y_test[r3]) plt.subplot(224) plt.imshow(x_test[r4], cmap=plt.get_cmap('gray')) plt.title(y_test[r4]) # show the plot plt.show()<jupyter_output><empty_output><jupyter_text>## ปรับเปลี่ยนรูปแบบของข้อมูล (Reshape) ปรับเปลี่ยนรูปแบบของข้อมูลจาก 28x28 ให้เป็น 784<jupyter_code>num_pixels = x_train.shape[1] * x_train.shape[2] print('Before reshape') print('shape of x_train', x_train.shape) print('number of pixel', num_pixels) #reshape each data point into a single row vector of 784 pixel values x_train = x_train.reshape(x_train.shape[0], num_pixels) / 255 x_test = x_test.reshape(x_test.shape[0], num_pixels) / 255 print('After reshape') print('shape of x_train', x_train.shape) print('shape of x_train', x_train.shape)<jupyter_output>Before reshape shape of x_train (60000, 28, 28) number of pixel 784 After reshape shape of x_train (60000, 784) shape of x_train (60000, 784) <jupyter_text>## เปลี่ยนรูปแบบของ Label ปรับเปลี่ยนรูปแบบ Label ทั้งในส่วนของ train และ test<jupyter_code>print('Before') print(y_train[0]) print(y_test[0]) from keras.utils import np_utils y_train = np_utils.to_categorical(y_train) y_test = np_utils.to_categorical(y_test) print('After') print(y_train[0]) print(y_test[0])<jupyter_output>After [0. 0. 0. 0. 0. 1. 0. 0. 0. 0.] [0. 0. 0. 0. 0. 0. 0. 1. 0. 0.] <jupyter_text>แสดงขนาดของ y_train และตัวอย่างข้อมูลจำนวน 10 รายการ<jupyter_code>print('shape of y_train', y_train.shape) print(y_train[0:10])<jupyter_output>shape of y_train (60000, 10) [[0. 0. 0. 0. 0. 1. 0. 0. 0. 0.] [1. 0. 0. 0. 0. 0. 0. 0. 0. 0.] [0. 0. 0. 0. 1. 0. 0. 0. 0. 0.] [0. 1. 0. 0. 0. 0. 0. 0. 0. 0.] [0. 0. 0. 0. 0. 0. 0. 0. 0. 1.] [0. 0. 1. 0. 0. 0. 0. 0. 0. 0.] [0. 1. 0. 0. 0. 0. 0. 0. 0. 0.] [0. 0. 0. 1. 0. 0. 0. 0. 0. 0.] [0. 1. 0. 0. 0. 0. 0. 0. 0. 0.] [0. 0. 0. 0. 1. 0. 0. 0. 0. 0.]] <jupyter_text># Multilayer Perceptron (MLP)โหลด Library ของ Keras เพื่อใช้สำหรับสร้าง MLP<jupyter_code>from keras.models import Sequential from keras.layers import Dense from keras.layers import Dropout from keras.layers import Activation<jupyter_output><empty_output><jupyter_text>### กำหนดจำนวนของ Output Class<jupyter_code>#after converting to one-hot format we can get the num classes from the shape #y_train = np_utils.to_categorical(y_train) #y_test = np_utils.to_categorical(y_test) num_classes = y_train.shape[1]<jupyter_output><empty_output><jupyter_text>## สร้างโมเดล (Model) ของ MLP<jupyter_code>model = Sequential() model.add(Dense(50, input_shape=(784,))) model.add(Activation('relu')) model.add(Dropout(0.2)) model.add(Dense(1000)) model.add(Activation('relu')) model.add(Dropout(0.2)) model.add(Dense(num_classes)) model.add(Activation('softmax'))<jupyter_output><empty_output><jupyter_text>## อธิบายวิธีการสร้าง Model ``` model = Sequential() ``` * กำหนดให้ input มีจำนวน 784 โหนด * จำนวนของ batch_size มีค่าเท่ากับ 50 หมายถึงในการคำนวณแต่ละครั้ง ข้อมูลจะถูกส่งเข้าไปคำนวณจำนวนครั้งละ 50 ชุด ``` model.add(Dense(50, input_shape=(784,))) ``` * กำหนด activation function ให้เป็นแบบ ReLU (rectified linear unit) ``` model.add(Activation('relu')) ``` * กำหนดให้ dropout = 0.2 ซึ่งหมายถึงการลดจำนวนของโหนดที่ใช้ในการคำนวณ ``` model.add(Dropout(0.2)) ``` * กำหนดให้ชั้นซ่อนชั้นแรกมีจำนวน 1000 โหนด ``` model.add(Dense(1000)) ``` ``` model.add(Activation('relu')) model.add(Dropout(0.2)) ``` * กำหนดให้ชั้นแสดงผลลัพธ์มีจำนวนโหนดเท่ากับ **num_classes** ซึ่งในกรณีนี้คือ 10 class ``` model.add(Dense(num_classes)) ``` * ใช้ฟังก์ชัน softmax เพื่อคำนวณหาคำตอบ โดยผลลัพธ์ที่ได้จะแสดงในรูปแบบของความน่าจะเป็น (probability) โดยค่าที่มากที่สุดคือคำตอบ ``` model.add(Activation('softmax')) ```## คอมไพล์ (Compile) โมเดล MLP ที่สร้างขึ้น ``` model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']) ``` โดยที่ * **loss function** ถูกกำหนดให้เป็น `loss='categorical_crossentropy'`` โดยที่ *categorical_crossentropy* จะถูกนำไปใช้กับ output ที่มีมากกว่า 2 กลุ่ม หากมีจำนวน 2 กลุ่มสามารถกำหนดให้เป็น `loss='binary_crossentropy'` * **optimizer** ถูกกำหนดให้ใช้ **adam optimization** ใช้สำหรับการปรับปรุงค่า Weight ในแต่ละรอบ (update network weights iterative) <jupyter_code>model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])<jupyter_output><empty_output><jupyter_text>## การเรียนรู้ (Training) โมเดล MLP โดยใช้คำสั่ง fit * สามารถเรียนรู้โมเดลโดยใช้คำสั่ง fit ดังต่อไปนี้ ``` model.fit(x_train, y_train, validation_split=0.1, epochs=10, batch_size=200, verbose=2) ``` **โดยที่** * **validation_split** หมายถึงการแบ่งข้อมูลออกมาเพื่อใช้ในการทดสอบโมเดล โดยที่ 0.1 หมายถึง 10% * **epochs** หมายถึงจำนวนรอบที่ใช้ในการเรียนรู้ * **batch_size** หมายถึงจำนวนของรูปภาพที่ส่งเข้าไปคำนวณในแต่ละครั้ง * **verbose** หมายถึงการแสดงผลลัพธ์ของการคำนวณ <jupyter_code># Fit and Evaluate model.fit(x_train, y_train, validation_split=0.1, epochs=10, batch_size=200, verbose=2)<jupyter_output>WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/tensorflow/python/ops/math_ops.py:3066: to_int32 (from tensorflow.python.ops.math_ops) is deprecated and will be removed in a future version. Instructions for updating: Use tf.cast instead. Train on 54000 samples, validate on 6000 samples Epoch 1/10 - 2s - loss: 0.5087 - acc: 0.8450 - val_loss: 0.1673 - val_acc: 0.9522 Epoch 2/10 - 2s - loss: 0.2393 - acc: 0.9284 - val_loss: 0.1238 - val_acc: 0.9647 Epoch 3/10 - 2s - loss: 0.1910 - acc: 0.9419 - val_loss: 0.1026 - val_acc: 0.9708 Epoch 4/10 - 2s - loss: 0.1618 - acc: 0.9500 - val_loss: 0.0958 - val_acc: 0.9720 Epoch 5/10 - 2s - loss: 0.1463 - acc: 0.9548 - val_loss: 0.0882 - val_acc: 0.9728 Epoch 6/10 - 2s - loss: 0.1340 - acc: 0.9581 - val_loss: 0.0838 - val_acc: 0.9747 Epoch 7/10 - 2s - loss: 0.1226 - acc: 0.9609 - val_loss: 0.0842 - val_acc: 0.9748 Epoch 8/10 - 2s - loss: 0.1121 - acc: 0.9645 - val_loss: 0.0827 - val_acc: 0.9752 Epoch 9/10 - 2s - loss: 0.1113 - [...]<jupyter_text>## ทดสอบประสิทธิภาพของโมเดล ด้วยคำสั่ง evaluate<jupyter_code># Evaluate on the test data scores = model.evaluate(x_test, y_test, verbose=0) print("Baseline Accuracy: %.2f%%" % (scores[1]*100))<jupyter_output>Baseline Accuracy: 97.41% <jupyter_text>* จากคำตอบ **Baseline Accuracy: 97.41%** ผลลัพธ์ที่ได้จากการทดสอบด้วยชุดทดสอบ (Test set) พบว่าโมเดลที่สร้างมีความถูกต้อง 97.41% * หากต้องการความถูกต้องที่เพิ่มสูงขึ้นอาจต้องปรับเปลี่ยนค่าพารามิเตอร์ (Parameter) เช่น จำนวนรอบในการเรียนรู้ หรือปรับเปลี่ยนโมเดล เช่นจำนวนของโหนดในชั้นซ่อน หรือเพิ่มจำนวนชั้นซ่อน เป็นต้น## ทดสอบการรู้จำตัวเลขจากชุดข้อมูล MNIST<jupyter_code>from keras.datasets import mnist #keras will automatically downlhoad the dataset (xx_train, yy_train), (xx_test, yy_test) = mnist.load_data() # predict the class for i in range(0,3): r_num = random.randint(0, train_size-1) plt.imshow(xx_train[r_num]) plt.show() output = model.predict_classes([[x_train[r_num]]]) print("predict class is ", output) <jupyter_output><empty_output>
no_license
/MNSIT_MLP.ipynb
mrolarik/deep-learning
13
<jupyter_start><jupyter_text># 데이터 살펴보기<jupyter_code>import matplotlib.pyplot as plt import seaborn as sns import numpy as np import pandas as pd import os print(os.getcwd()) train = pd.read_csv("data/train.csv") test = pd.read_csv("data/test.csv") sub = pd.read_csv("data/sample_submission.csv") age = pd.read_csv("data/age_gender_info.csv") train.shape, test.shape, sub.shape, age.shape train.info() test.info()<jupyter_output><class 'pandas.core.frame.DataFrame'> RangeIndex: 1022 entries, 0 to 1021 Data columns (total 14 columns): # Column Non-Null Count Dtype --- ------ -------------- ----- 0 단지코드 1022 non-null object 1 총세대수 1022 non-null int64 2 임대건물구분 1022 non-null object 3 지역 1022 non-null object 4 공급유형 1022 non-null object 5 전용면적 1022 non-null float64 6 전용면적별세대수 1022 non-null int64 7 공가수 1022 non-null float64 8 자격유형 1020 non-null object 9 임대보증금 842 non-null object 10 임대료 842 non-null object 11 도보 10분거리 내 지하철역 수(환승노선 수 반영) 980 non-null float64 12 도보 10분거리 내 버스정류장 수 1022 non-null float64 13 단지내[...]<jupyter_text># 결측치가 얼마나 될까?<jupyter_code>### 결측치를 확인 train.isna().sum() test.isna().sum()<jupyter_output><empty_output><jupyter_text>* 임대보증금, 임대료, 지하철역수, 버스정류장수(train only), 자격유형(test only)<jupyter_code>train.head() train.columns test.head() train['자격유형'].value_counts() mapping = { 'A':1, 'B':2, 'C':3, 'D':4, 'E':5, 'F':6, 'G':7, 'H':8, 'I':9, 'J':10, 'K':11, 'L':12, 'M':13, 'N':14, 'O':15 } train['자격유형']=train['자격유형'].map(mapping).astype(int) test.loc[test['자격유형'].isnull()] grouped = test.groupby(['단지코드','임대건물구분','지역','공급유형']) group1 = grouped.get_group(('C2411','아파트','경상남도','국민임대')) group1 # 자격유형이 A일것으로 판단 grouped = test.groupby(['단지코드','임대건물구분','지역','공급유형']) group2 = grouped.get_group(('C2253','아파트','강원도','영구임대')) group2 # 자격유형이 C일것으로 판단 test.loc[196,"자격유형"]='A' test.loc[258,'자격유형']='C' mapping = { 'A':1, 'B':2, 'C':3, 'D':4, 'E':5, 'F':6, 'G':7, 'H':8, 'I':9, 'J':10, 'K':11, 'L':12, 'M':13, 'N':14, 'O':15 } test['자격유형']=test['자격유형'].map(mapping).astype(int) <jupyter_output><empty_output><jupyter_text>## Lasso 모델<jupyter_code>#train dataset의 train과 test로 나누기 from sklearn.model_selection import train_test_split from sklearn.linear_model import Lasso, Ridge sel = ['총세대수', '전용면적', '전용면적별세대수', '공가수', '단지내주차면수', '자격유형'] X = train[sel] y = train['등록차량수'] X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0) X_train.shape, X_test.shape, y_train.shape, y_test.shape from sklearn.metrics import mean_squared_error, mean_absolute_error, mean_squared_log_error #평가해보기 ''' MAE(mean absolute error) : 각각의 값에 절대값을 취해서 이를 전부 더한 후, 갯수로 나누어주기 MSE(mean squared error) : (실제값 - 예측값 ) **2 전부 더해서 개수로나누어 준다 RMSE(root mean squared error) : MSE에 루트 씌워준다 ''' alpha = [0.0001,0.001,0.005,0.01,0.05,0.1,0.5,1] for i in alpha: model = Lasso(alpha=i).fit(X_train, y_train) #학습 pred = model.predict(X_test) #예측 pred MAE = np.mean(abs(y_test - pred)) MSE = mean_squared_error(y_test, pred) RMSE = mean_squared_error(y_test, pred) ** 0.5 print("alpha:",i, "MAE:",MAE, "MSE:",MSE, "RMSE:",RMSE) #결정계수 구하기 print("학습(score):", model.score(X_train,y_train)) #결정계수 print("테스트(score):", model.score(X_test,y_test)) #train dataset의 train과 test로 나누기 from sklearn.model_selection import train_test_split from sklearn.linear_model import Lasso, Ridge sel = ['총세대수', '전용면적', '전용면적별세대수', '공가수', '단지내주차면수', '자격유형'] X_train = train[sel] X_test = test[sel] y_train = train['등록차량수'] # 모델을 만들고 예측 model = Lasso(alpha=0.5) #학습 model.fit(X_train, y_train) pred = model.predict(X_test) #예측 pred test['등록차량수']=pred test['단지별차량수평균']=test.groupby('단지코드')['등록차량수'].transform(np.mean) test_new = test.drop_duplicates(['단지코드'],keep='first').reset_index() sub_df = test_new[['단지코드','단지별차량수평균']] sub_df.columns =['code','num'] sub_df.to_csv('second_0715.csv',index=False) <jupyter_output><empty_output>
no_license
/04_lasso_model.ipynb
jinijini-jinilamp/dacon_parking_demand_competition
4
<jupyter_start><jupyter_text># Segundo parcial ## Juan David Salcedo Hernández. 1001577699A) Los siguientes datos fueron extraidos de un [perfil real de Instragram](https://www.instagram.com/paulacano24/): https://raw.githubusercontent.com/restrepo/ComputationalMethods/master/data/Followers.json Interprete los datos como un problema de caida libre y a partir del ajuste (fit) a la correspondiente ecuación de movimiento, determine 1. La velocidad inicial de incremento de seguidores en `followers/hour` 1. La aceleración constante en `followers/hour**2` 1. El número máximo de seguidores que alcanzaría el perfil 1. La fecha en la que se alcanzaría dicho máximo <jupyter_code># Usual imports and graphics configs %config InlineBackend.figure_format = 'svg' import numpy as np import pandas as pd import matplotlib.pyplot as plt import scipy.integrate as integrate import scipy.optimize as optimize # First problem # We begin by reading the dataframe path = 'https://raw.githubusercontent.com/restrepo/ComputationalMethods/master/data/Followers.json' df = pd.read_json(path) df.head() import inspect # the following functions are generalised to find fit parameters and maximum values of functions def model_fit(function, _t, _x): # perform the optimisation process and populate the array of predictions popt, pcov = optimize.curve_fit(function, _t, _x) predictions = function(_t, *popt) # define an array of parameter names param_names = [i for i in inspect.signature(function).parameters.keys()] param_names = np.array(param_names) # make a dictionary with parameter names and their corresponding values params = {A: B for A, B in zip(param_names[1:], popt)} return [params, predictions] def model_max(function, args, x0=0): def _function(x, *params): return -function(x, *params) # perform the optimisation process opt_result = optimize.fmin_powell(_function, x0=x0, args=args, disp=0, full_output=True) max_point = opt_result[0] max_val = opt_result[1] return [float(max_point), -float(max_val)] # since we're interpreting this data as a free-fall problem, we may use the formula # x = x_ini + v_ini*t - 0.5*a*t**2, where x is the number of followers. def _free_fall(t, x_ini, v_ini, a): return x_ini + v_ini*t - 0.5*a*t**2 # define the optimal params params = model_fit(_free_fall, df.hours, df.followers)[0] # define predictions and actual values predictions = model_fit(_free_fall, df.hours, df.followers)[1] actual_vals = df.followers _t = df.hours # plot the graph of the function plt.plot(_t, actual_vals, 'b.', label='Observations') plt.plot(_t, predictions, 'k-', label='Model') plt.xlabel('Hours') plt.ylabel('Followers') plt.legend() plt.grid() plt.plot() # these are the optimal parameters # notice that the acceleration is intrinsically negative pd.DataFrame.from_dict(params, orient='index', columns=['Value']) # find the maximum point and maximum value param_values = tuple(params.values()) x0 = _t[len(_t)-1] [max_time, max_followers] = model_max(_free_fall, param_values, x0=x0) import datetime # convert the amount of hours that need to elaps into a date, and # add them to the initial time max_date = df['date_time'][0] + datetime.timedelta(hours = max_time) print(f'The amount of followers will reach a maximum of {max_followers} at {max_date}, after {max_time} hours')<jupyter_output>The amount of followers will reach a maximum of 50262.13655441515 at 2021-10-17 00:18:28.779618, after 1754.1930865606619 hours <jupyter_text>B) El período de un pendulo de longitud $l$ oscilando a ángulos grandes $\alpha$, está dado por $$ T=T_{0} \frac{\sqrt{2}}{\pi} \int_{0}^{\alpha} \frac{d \theta}{(\cos \theta-\cos \alpha)^{\frac{1}{2}}} $$ donde $$ T_{0}=2 \pi \sqrt{\frac{l}{g}} $$ es el período del mismo péndulo a pequeñas amplitudes. Culquier intento de evaluación numérico de esa integral debería fallar. Sin embargo, si hacemos el cambio de variable $$ \sin \frac{\theta}{2}=\sin \frac{\alpha}{2} \sin \phi $$ obtenemos $$ T=\frac{2 T_{0}}{\pi} \int_{0}^{\frac{\pi}{2}} \frac{d \phi}{\left(1-\sin ^{2} \frac{\alpha}{2} \sin ^{2} \phi\right)^{\frac{1}{2}}} $$ que corresponde a una integral bien comportada. 1. Escriba un programa que use la anterio integral para calcular el cociente $T/T_0$ para ángulos enteros (en grados) $0^\circ \le\alpha \le 90^\circ$. 1. Genere un DataFrame de pandas que contenga como columnas: la amplitud en grados, la amplitud en radianes, y el cociente $T/T_0$, 1. Haga un gráfico de $T/T_0$ como función de $\alpha$ (en grados). 1. Explain the result when $\alpha = 0.$ <jupyter_code># Second problem def integrand(phi, alpha): return 1/np.sqrt(1-np.sin(0.5*alpha)**2 * np.sin(phi)**2) def graph(alpha): alpha = np.deg2rad(alpha) expression = lambda phi: integrand(phi, alpha) value = (2/np.pi)*integrate.quad(expression, 0, np.pi/2)[0] return value # populate the datafarme df = pd.DataFrame({'degrees': np.linspace(0,90)}) df['radians'] = df.degrees.apply(np.deg2rad) df['T/T0'] = df.degrees.apply(graph) df.head() plt.plot(df.degrees, df['T/T0'], 'k.') plt.xlabel('Angle / Degrees$\,^\circ$') plt.ylabel('Ratio $T/T_0$') plt.grid() plt.show()<jupyter_output><empty_output>
no_license
/1001577699/Copy_of_Examen_2021_1_02_enunciado.ipynb
CatalinaEspejo/Evaluacion_2021-1
2
<jupyter_start><jupyter_text><jupyter_code>from google.colab import drive drive.mount('/content/drive/') from google.colab import drive drive.mount('/content/drive') import numpy as np import pandas as pd import matplotlib.pyplot as plt %matplotlib inline import os !pip install "tensorflow>=1.15,<2.0" import os os.chdir('drive/My Drive/Colab Notebooks/') data_directory = 'data/' data = [] for file in os.listdir(data_directory): if(str(file).split('.')[1] == 'csv'): reader = pd.read_csv(os.path.join(data_directory,file)) reader = reader.dropna() data.append(reader.iloc[:,1:5].values.astype('float32')) sz = [] sz.append(len(data[0])) dat= data[0] for i in range(1,len(data)): sz.append(len(data[i])) dat = np.vstack((dat,data[i])) print(dat.shape, sz[i]) from sklearn.preprocessing import MinMaxScaler sc= MinMaxScaler(feature_range=(0,1)) dat[:] = sc.fit_transform(dat[:]) final_data = [] koto = 0 for i in sz: final_data.append(dat[koto: koto + i]) koto += i look_back = 7 data_x = [] data_y = [] for i in final_data: for j in range(len(i) - look_back): data_x.append(i[j: j + look_back]) data_y.append(i[j + look_back]) data_x = np.array(data_x) data_y = np.array(data_y) x_train = data_x[0:500] x_test = data_x[500:] y_train = data_y[0:500] y_test = data_y[500:] from keras.models import Sequential from keras.layers import Dense from keras.layers import LSTM model = Sequential() model.add(LSTM(50, input_shape=(x_train.shape[1], x_train.shape[2]))) model.add(Dense(4)) model.compile(loss='mae', optimizer='adam') # fit network history = model.fit(x_train, y_train, epochs=50, batch_size=8, validation_data=(x_test, y_test), verbose=2, shuffle=False) yhat = model.predict(x_test) res = sc.inverse_transform(yhat) ori = sc.inverse_transform(y_test) for i in range(len(x_test)): print(res[i], ori[i])<jupyter_output>[100.406364 127.31081 92.11021 52.34576 ] [ 98. 140. 91. 54.] [100.02752 131.31993 91.67709 52.708332] [100. 137. 100. 55.] [100.201645 150.70638 84.26031 54.461975] [100. 157. 81. 49.] [100.17899 151.38977 81.51754 51.71851] [100. 149. 88. 53.] [100.20788 147.73479 83.51148 52.796703] [100. 156. 68. 41.000004] [100.20917 150.45288 76.27941 47.108654] [100. 165. 76. 42.] [100.29156 154.3024 75.33917 44.5587 ] [100. 164. 68. 46.] [100.38052 155.50528 73.45059 44.303448] [100. 148. 79. 52.] [100.46072 149.07004 77.03934 48.016285] [100. 155. 74. 52.] [100.459785 149.14716 76.99821 48.733498] [100. 156. 65. 44.] [100.45213 150.54663 72.20945 45.74949] [100. 161. 77. 54.] [100.46304 151.48848 74.93573 48.318977] [100. 146. 70. 51.] [100.425995 146.92993 73.88365 49.241966] [100. 144. 68. 50.000004] [100.40475 143.87111 72.41773 49.0819 ] [100. 149. 59. 39.] [100.410484 145.[...]
no_license
/icu_patient.ipynb
shamim34/code_practice
1
<jupyter_start><jupyter_text># Playing with Encodings ## Week 1. Practice Programming Assignment 1 In this assignment we will check, that everything is set up so coursera graders work fine in programming assignments, and will practice python encoding functions a bit.##### Import coursera grader tools<jupyter_code>import sys sys.path.append("..") import grading grader = grading.Grader(assignment_key="BWTScac0SDGglNs7aK8CLA", all_parts=["EsbZk", "j9e6W", "k2flq", "2kPZj", "RUDvM"]) # token expires every 30 min COURSERA_EMAIL = '[email protected]' COURSERA_TOKEN = 'QjdaVYKaUEDH7yei'<jupyter_output><empty_output><jupyter_text>### Part 1. Number conversionIn this part you don't have to do anything but go through and submit an answer. Just checking that the grader works. Let's look at a character 'h'.<jupyter_code># we can find its code point number in decimal system ord('h') # we can convert it into a hexadicmal form # remember, that '0x' is just a prefix, meaning that this is a hexadecimal form of a number hex(104) # or we can conver it back to decimal int('68', 16) # we can convert it into a binary form # remember, that '0b' is just a prefix, meaning that this is a binary form of a number bin(104) # or back again int('1101000', 2) # and we can print any character by its code point print('\x68') print('\u0068') print('\U00000068')<jupyter_output>h h h <jupyter_text>What is letter 'h' code point number in hexadecimal form?<jupyter_code>answer_part_1 = 68<jupyter_output><empty_output><jupyter_text>What is letter 'h' code point number in binary form? <jupyter_code>answer_part_2 = 2 answer_part_2 = 1101000 # Setting our answers to grader. Do not change! grader.set_answer("EsbZk", answer_part_1) grader.set_answer("j9e6W", answer_part_2) # you can make submission with answers so far to check yourself at this stage grader.submit(COURSERA_EMAIL, COURSERA_TOKEN)<jupyter_output>Submitted to Coursera platform. See results on assignment page! <jupyter_text> ### Part 2. Decoding secret messagesIn this part you are given several byte-strings. These are some messages encoded by some of the encodings covered in lectures. I suggest you to try different encodings to decode the messages until you will get some meaningful result. The results should be submited via grader.<jupyter_code>secret_message_1 = b'\xff\xfeY\x00e\x00a\x00h\x00s\x00!\x00 \x00I\x00 '\ b'\x00d\x00e\x00c\x00o\x00d\x00e\x00d\x00 \x00t\x00h\x00e\x00 '\ b'\x00s\x00e\x00c\x00r\x00e\x00t\x00 \x00m\x00e\x00s\x00s\x00a'\ b'\x00g\x00e\x00' secret_message_1.decode('utf-16') # put decoded message here answer_part_3 = secret_message_1.decode('utf-16') # Setting our answers to grader. Do not change! grader.set_answer("k2flq", answer_part_3) # you can make submission with answers so far to check yourself at this stage grader.submit(COURSERA_EMAIL, COURSERA_TOKEN)<jupyter_output>Submitted to Coursera platform. See results on assignment page! <jupyter_text><jupyter_code>secret_message_2 = b"\xff\xfe\x00\x00T\x00\x00\x00o\x00\x00\x00 "\ b"\x00\x00\x00d\x00\x00\x00e\x00\x00\x00c\x00"\ b"\x00\x00o\x00\x00\x00d\x00\x00\x00e\x00\x00"\ b"\x00 \x00\x00\x00t\x00\x00\x00h\x00\x00\x00e"\ b"\x00\x00\x00 \x00\x00\x00t\x00\x00\x00h\x00"\ b"\x00\x00i\x00\x00\x00r\x00\x00\x00d\x00\x00"\ b"\x00 \x00\x00\x00s\x00\x00\x00e\x00\x00\x00c"\ b"\x00\x00\x00r\x00\x00\x00e\x00\x00\x00t\x00\x00"\ b"\x00 \x00\x00\x00m\x00\x00\x00e\x00\x00\x00s\x00"\ b"\x00\x00s\x00\x00\x00a\x00\x00\x00g\x00\x00\x00e\x00"\ b"\x00\x00 \x00\x00\x00y\x00\x00\x00o\x00\x00\x00u\x00\x00"\ b"\x00 \x00\x00\x00s\x00\x00\x00h\x00\x00\x00o\x00\x00\x00u"\ b"\x00\x00\x00l\x00\x00\x00d\x00\x00\x00 \x00\x00\x00u\x00\x00"\ b"\x00s\x00\x00\x00e\x00\x00\x00 \x00\x00\x00'\x00\x00\x00s\x00"\ b"\x00\x00h\x00\x00\x00i\x00\x00\x00f\x00\x00\x00t\x00\x00"\ b"\x00_\x00\x00\x00j\x00\x00\x00i\x00\x00\x00s\x00\x00"\ b"\x00'\x00\x00\x00 \x00\x00\x00e\x00\x00\x00n\x00\x00\x00c\x00"\ b"\x00\x00o\x00\x00\x00d\x00\x00\x00i\x00\x00\x00n\x00\x00\x00g\x00\x00\x00" secret_message_2.decode('utf-32') # put decoded message here answer_part_4 = secret_message_2.decode('utf-32') # Setting our answers to grader. Do not change! grader.set_answer("2kPZj", answer_part_4) # you can make submission with answers so far to check yourself at this stage grader.submit(COURSERA_EMAIL, COURSERA_TOKEN)<jupyter_output>Submitted to Coursera platform. See results on assignment page! <jupyter_text><jupyter_code>secret_message_3 = b'\x83n\x83\x8d\x81[\x83\x8f\x81[\x83\x8b\x83h' secret_message_3.decode('shift_jis') # put decoded message here answer_part_5 = secret_message_3.decode('shift_jis') # Setting our answers to grader. Do not change! grader.set_answer("RUDvM", answer_part_5) # you can make submission with answers so far to check yourself at this stage grader.submit(COURSERA_EMAIL, COURSERA_TOKEN)<jupyter_output>Submitted to Coursera platform. See results on assignment page!
no_license
/data_scraping/week1/.ipynb_checkpoints/week01_encodings-checkpoint.ipynb
zavarovkv/hse-ds-2020
7
<jupyter_start><jupyter_text>## Adding features from IMDBpie<jupyter_code>movies['genres'] = movies['tconst'].apply(lambda x: imdb.get_title_by_id(x).genres) movies['runtime'] = movies['tconst'].apply(lambda x: imdb.get_title_by_id(x).runtime) movies['certification'] = movies['tconst'].apply(lambda x: imdb.get_title_by_id(x).certification) movies['plot'] = movies['tconst'].apply(lambda x: imdb.get_title_by_id(x).plots) movies['year'] = movies['tconst'].apply(lambda x: imdb.get_title_by_id(x).year) movies['tagline'] = movies['tconst'].apply(lambda x: imdb.get_title_by_id(x).tagline) movies['rating'] = movies['tconst'].apply(lambda x: imdb.get_title_by_id(x).rating) movies['type'] = movies['tconst'].apply(lambda x: imdb.get_title_by_id(x).type)<jupyter_output><empty_output><jupyter_text>## Adding features by transforming<jupyter_code>movies['ngenres'] = movies['genres'].apply(len) movies['isCrime'] = movies['genres'].apply(lambda x: 1 if 'Crime' in x else 0) genre_classes = ["Romance", "Thriller", "History", "Biography", "Adventure", "Fantasy", "Western", "Sci-Fi", "Family", "War", "Animation", "Horror", "Sport", "Mystery", "Music", "Drama", "Comedy", "Action"] for i in genre_classes: movies['is'+i] = movies['genres'].apply(lambda x: 1 if i in x else 0) movies['isGood'] = movies['rating'].apply(lambda x: 1 if x >= 8 else 0) np.mean(movies['rating']) movies.to_csv("~/Datasets/movies.csv", encoding='utf-8') movies.head() movies.columns movies['type'].value_counts() movies = movies[movies['type'] != "tv_series"] movies['type'].value_counts() movies['isGood'].value_counts() movies['certification'].value_counts() plt.hist(movies['rating']) plt.hist(movies['year']) for i in movies.columns: movies[i].describe() shark. ultraviolet = imdb.get_title_by_id('tt0370032') ultraviolet.plot_outline x = ['brixton', 'harvard', 'melville', 'r'] if "r" in x: print "yes" else: print "no" mov = pd.read_csv("~/Datasets/movies.csv") mov.head()<jupyter_output><empty_output>
no_license
/IMDB API.ipynb
cgeppig/DSI_week_03
2
<jupyter_start><jupyter_text># 2장 ### url 추출<jupyter_code># 지정한 URL 웹 페이지 추출 from urllib.request import urlopen f = urlopen('http://www.naver.com') # HTTPResponse 자료형의 객체 type(f) # 본문 추출 f.read()[:1000] #read(): Reads and returns the response body, or up to the next amt bytes. #한번 읽으면 다시 return 못하는 듯. # 상태 코드 추출 f.status<jupyter_output><empty_output><jupyter_text>### 인코딩 방식을 추출하고 그에 맞게 디코딩하기<jupyter_code># HTTP 헤더의 값 추출 -> Content-Type헤더를 참조하면 인코딩 방식을 알 수 있고, 이에 따라 문자코드(UTF-8, EUC-KR)를 지정해서 디코딩 f.getheader('Content-Type') # encoding 방식(utf-8) 추출 encoding = f.info().get_content_charset(failobj='UTF-8') encoding # 추출한 encoding방식으로 decoding f = urlopen('http://www.naver.com') text = f.read().decode(encoding) text[:1000]<jupyter_output><empty_output><jupyter_text>- Content-Type 헤더에서 인코딩 정보가 항상 맞는 것은 아니므로 - 위의 방법으로 UnicodeDecodeError가 발생하면 - meta 태그 또는 응답 본문의 바이트열도 확인하여 인코딩 방식을 결정해야한다. - `` 이런 식으로 앞부분에 적혀 있음.<jupyter_code>import re import sys f = urlopen("http://www.naver.com") bytes_content = f.read() # 앞부분에 대체로 meta태그가 있기 때문에 앞부분만 디코딩하고, 정규표현식으로 charset값을 추출 scanned_text = bytes_content[:1024].decode('ascii', errors='replace') scanned_text # re.search는 밑에서 자세히 match = re.search(r'charset=["\']?([\w-]+)', scanned_text) print(match) print(match.group(1)) bytes_content.decode(match.group(1))[:1000]<jupyter_output><empty_output><jupyter_text>### 스크레이핑 - 정규표현식 / XML파서 이용 - (궁금해서 찾아본) HTML과 XML 비교 [참고](https://www.crocus.co.kr/1493) + 둘 다 마크업 언어(태그 등을 이용하여 문서나 데이터의 구조를 명기하는 언어) + 목적이 다른데, * XML은 데이터 저장과 전송을 목적으로, * HTML은 데이터를 웹상에 표현하기 위한 목적. + 태그 차이도 존재 * HTML은 태그가 정해져있으나 XML은 미리 정의된 태그가 없음. * HTML은 태그 사양이 더 유연함. 생략을 한다던지. 웹 브라우저는 문법에 문제가 있는 HTML이라도 출력해줌. + 추가적으로 * HTML은 웹 환경에서 작동되는 언어, XML은 환경에 구애받지 않음. * HTML은 데이터+표현 동시에, XML은 데이터만.1. 정규표현식 [정규표현식 연습 사이트](https://pythex.org) - HTML을 문자열으로 취급. - 마크업되지 않은 웹 페이지도 스크레이핑 가능<jupyter_code># re모듈은 정규표현식 패턴의 \ 처리를 도와줌. re.search(r'a.*c', 'abc123DEF') # 두번째 매개변수에 첫번째 정규표현식 부분을, Match객체로 반환. 안맞으면 None return. re.search(r'a.*d', 'abc123DEF', re.IGNORECASE) # re.IGNORECASE : 대소문자 구분 무시 m = re.search(r'a(.*)c', 'abc123DEF') print(m.group(0)) # 0이면 모든 값 반환. print(m.group(1)) # 1이면 ()안에 있는 값 반환. re.findall(r'\w{2,}', 'This is a pen') #2글자 이상의 단어 추출 re.sub(r'\w{2,}', 'That', 'This is a pen') # 대체 f = urlopen("http://www.hanbit.co.kr/store/books/full_book_list.html") html = f.read().decode('utf-8') for partial_html in re.findall(r'<td class="left"><a.*?</td>', html, re.DOTALL)[:6] : print('html :', partial_html) url = re.search(r'<a href="(.*?)">', partial_html).group(1) url = 'http://www.hanbit.co.kr' + url print('url :',url) title = re.sub(r'<.*?>', '', partial_html) title = re.sub(r'&#40;', '(', title) title = re.sub(r'&#41;', ')', title) print('title :',title) print("-----------")<jupyter_output>html : <td class="left"><a href="/store/books/look.php?p_code=B6498472179">죽음의 부정</a></td> url : http://www.hanbit.co.kr/store/books/look.php?p_code=B6498472179 title : 죽음의 부정 ----------- html : <td class="left"><a href="/store/books/look.php?p_code=B8548562053">IT CookBook , MySQL로 배우는 데이터베이스 개론과 실습</a></td> url : http://www.hanbit.co.kr/store/books/look.php?p_code=B8548562053 title : IT CookBook , MySQL로 배우는 데이터베이스 개론과 실습 ----------- html : <td class="left"><a href="/store/books/look.php?p_code=B7446987985">숨은 그림 찾기&#40;똑똑한 두뇌연습 만 4~6세&#41;</a></td> url : http://www.hanbit.co.kr/store/books/look.php?p_code=B7446987985 title : 숨은 그림 찾기(똑똑한 두뇌연습 만 4~6세) ----------- html : <td class="left"><a href="/store/books/look.php?p_code=B5675781518">리얼 홍콩 마카오 [2019~2020년 개정판]</a></td> url : http://www.hanbit.co.kr/store/books/look.php?p_code=B5675781518 title : 리얼 홍콩 마카오 [2019~2020년 개정판] ----------- html : <td class="left"><a href="/store/books/look.php?p_code=B6797717479">Head First Agile : 개념부터[...]<jupyter_text>2. XML파서 - XML태그를 분석(파싱)하고 필요한 부분을 추출. - 정규표현식보다 간단하게 추출 가능. - HTML이 XML보다 유연하기 때문에 문제가 있는 HTML을 파싱하는데 어려움이 있을 수 있음. - HTML 파서는 없나 생각했는데, python 표준 모듈 html.parser모듈이 있다고. 그러나 번거롭다고 한다.- RSS란? 블로그나 뉴스 같은 웹사이트는 변경 정보등을 RSS라는 이름으로 제공. 이는 XML형식으로 제공됨. - 따라서, 스크레이핑하고 싶은 정보가 RSS로 제공되면 XML파서로 스크레이핑 가능.<jupyter_code>from xml.etree import ElementTree # parse로 파일을 읽음 tree = ElementTree.parse('rss.xml') tree # XML tree의 루트를 찾고 root = tree.getroot() root # 주어진 파일에서 시간과 날씨를 찾기위해 root에서 해당 태그로 내려감 # root > channel > item > description > body > location > data for item in root.findall('channel/item/description/body/location/data')[:6] : tm_ef = item.find('tmEf').text # text로 텍스트 값 저장 tmn = item.find('tmn').text tmx = item.find('tmx').text wf = item.find('wf').text print(tm_ef, tmn, tmx, wf)<jupyter_output>2019-08-21 00:00 24 30 구름많음 2019-08-21 12:00 24 30 흐리고 비 2019-08-22 00:00 23 30 구름많고 비 2019-08-22 12:00 23 30 구름많음 2019-08-23 00:00 22 30 구름많음 2019-08-23 12:00 22 30 구름많음
no_license
/파이썬을 이용한 웹 크롤링과 스크레이핑/2장.ipynb
jsj267/Book-notes
5
<jupyter_start><jupyter_text>## DeLong: Teaching Economics Last edited: 2019-10-12 # Deep Roots of Relative Development #### Due ???? via upload to ??? ### J. Bradford DeLong #### Derived from QuantEcon: Linear Regression in Python: &nbsp; You should have gotten to this point vis this link: &nbsp; ### Table of Contents 1. 2. 3. &nbsp;<jupyter_code>#libraries: !pip install linearmodels import numpy as np import matplotlib.pyplot as plt import pandas as pd import statsmodels.api as sm from statsmodels.iolib.summary2 import summary_col from linearmodels.iv import IV2SLS # inline graphics %matplotlib inline ajr_df = pd.read_csv('https://delong.typepad.com/files/ajr.csv') ajr_df.head()<jupyter_output><empty_output><jupyter_text>Let’s use a scatterplot to see whether any obvious relationship exists between GDP per capita and the protection against expropriation index:<jupyter_code>plt.style.use('seaborn') ajr_df.plot.scatter(x='avexpr', y='logpgp95') plt.show()<jupyter_output><empty_output><jupyter_text>Let's add three-letter country labels to the points:<jupyter_code>x = ajr_df['avexpr'] y = ajr_df['logpgp95'] labels = ajr_df['shortnam'] fig, ax = plt.subplots() ax.scatter(x, y, marker='.') for i, txt in enumerate(labels): ax.annotate(txt, (x.iloc[i], y.iloc[i])) plt.show() x<jupyter_output><empty_output><jupyter_text>Let's fit a linear model to this scatter: > (1) $ \ln(pgp_95)_i= β_0 + β_1(avexpr_i) + u_i $ * $ β_0 $ is the intercept of the linear trend line on the y-axis * $ β_1 $ is the slope of the linear trend line, representing the marginal association of protection against against expropriation risk with log GDP per capita * $ u_i $ is an error term. Fitting this linear model chooses a straight line that best fits the data in a least-squares, as in the following plot (Figure 2 in AJR):<jupyter_code># dropping NA's is required to use numpy's polyfit... # using only 'base sample' for plotting purposes... ajr_df = ajr_df.dropna(subset=['logpgp95', 'avexpr']) ajr_df = ajr_df[ajr_df['baseco'] == 1] x = ajr_df['avexpr'].tolist() y = ajr_df['logpgp95'].tolist() labels = ajr_df['shortnam'].tolist() fig, ax = plt.subplots() ax.scatter(x, y, marker='.') for i, txt in enumerate(labels): ax.annotate(txt, (x[i], y[i])) ax.plot(np.unique(x), np.poly1d(np.polyfit(x, y, 1))(np.unique(x)), color='black') ax.set_xlabel('Inverse Expropriation Risk Classification, 1985-95') ax.set_ylabel('Log GDP per capita 1995 (PPP)') ax.set_title('Figure 2: OLS Relationship: Prosperity and "Property Security Institutions"') plt.show()<jupyter_output><empty_output><jupyter_text>To estimate the constant term $ β_0 $, we need to add a column of 1’s to our dataframe so that we can use statsmodels's OLS routines:<jupyter_code>ajr_df['constant'] = 1 regression_1 = sm.OLS(endog=ajr_df['logpgp95'], exog=ajr_df[['constant', 'avexpr']], missing='drop') results_1 = regression_1.fit() print(results_1.summary())<jupyter_output> OLS Regression Results ============================================================================== Dep. Variable: logpgp95 R-squared: 0.540 Model: OLS Adj. R-squared: 0.533 Method: Least Squares F-statistic: 72.82 Date: Wed, 16 Oct 2019 Prob (F-statistic): 4.72e-12 Time: 11:23:22 Log-Likelihood: -68.168 No. Observations: 64 AIC: 140.3 Df Residuals: 62 BIC: 144.7 Df Model: 1 Covariance Type: nonrobust ============================================================================== coef std err t P>|[...]<jupyter_text>We extend our bivariate regression model to a multivariate regression model by adding in other factors correlated with $ \ln(pgp_95)_i $: * climate, as proxied by latitude * the different culture and history of different continents latitude is used to proxy this differences that affect both economic performance and institutions, eg. cultural, historical, etc.; controlled for with the use of continent dummies Let’s estimate some of the extended models considered in the paper (Table 2) using data from<jupyter_code>ajr2_df = pd.read_csv('https://delong.typepad.com/files/ajr2.csv') ajr2_df['constant'] = 1 X1 = ['constant', 'avexpr'] X2 = ['constant', 'avexpr', 'lat_abst'] X3 = ['constant', 'avexpr', 'lat_abst', 'asia', 'africa', 'other'] regression_2 = sm.OLS(ajr2_df['logpgp95'], ajr2_df[X1], missing='drop').fit() regression_3 = sm.OLS(ajr2_df['logpgp95'], ajr2_df[X2], missing='drop').fit() regression_4 = sm.OLS(ajr2_df['logpgp95'], ajr2_df[X3], missing='drop').fit() info_dict={'R-squared' : lambda x: f"{x.rsquared:.2f}", 'No. observations' : lambda x: f"{int(x.nobs):d}"} results_table = summary_col(results=[regression_2, regression_3, regression_4], float_format='%0.2f', stars = True, model_names=['Model 1', 'Model 3', 'Model 4'], info_dict=info_dict, regressor_order=['constant', 'avexpr', 'lat_abst', 'asia', 'africa']) results_table.add_title('Table 2 - OLS Regressions') print(results_table) # Dropping NA's is required to use numpy's polyfit df1_subset2 = ajr_df.dropna(subset=['logem4', 'avexpr']) X = df1_subset2['logem4'] y = df1_subset2['avexpr'] labels = df1_subset2['shortnam'] # Replace markers with country labels fig, ax = plt.subplots() ax.scatter(X, y, marker='') for i, label in enumerate(labels): ax.annotate(label, (X.iloc[i], y.iloc[i])) # Fit a linear trend line ax.plot(np.unique(X), np.poly1d(np.polyfit(X, y, 1))(np.unique(X)), color='black') ax.set_xlim([1.8,8.4]) ax.set_ylim([3.3,10.4]) ax.set_xlabel('Log of Settler Mortality') ax.set_ylabel('Average Expropriation Risk 1985-95') ax.set_title('Figure 3: First-stage relationship between settler mortality \ and expropriation risk') plt.show() df4 = pd.read_stata('https://github.com/QuantEcon/QuantEcon.lectures.code/raw/master/ols/maketable4.dta') df4 = df4[df4['baseco'] == 1] df4['const'] = 1 iv = IV2SLS(dependent=df4['logpgp95'], exog=df4['const'], endog=df4['avexpr'], instruments=df4['logem4']).fit(cov_type='unadjusted') print(iv.summary)<jupyter_output> IV-2SLS Estimation Summary ============================================================================== Dep. Variable: logpgp95 R-squared: 0.1870 Estimator: IV-2SLS Adj. R-squared: 0.1739 No. Observations: 64 F-statistic: 37.568 Date: Wed, Oct 16 2019 P-value (F-stat) 0.0000 Time: 11:23:24 Distribution: chi2(1) Cov. Estimator: unadjusted Parameter Estimates ============================================================================== Parameter Std. Err. T-stat P-value Lower CI Upper CI ----------------------------------------------------[...]<jupyter_text>---- &nbsp; ## Deep Roots of Relative Development ### Catch Our Breath—Further Notes: ---- * weblog support: * nbViewer: * datahub: &nbsp; ----<jupyter_code>pwt91_df = pd.read_csv('https://delong.typepad.com/files/pwt91-data.csv') pwt91_df.head()<jupyter_output><empty_output>
non_permissive
/Deep-Roots-of-Relative-Development.ipynb
braddelong/LS2019
7
<jupyter_start><jupyter_text>## KNN Classifier multiple targets - near neighbors classifier## sample 1 - Iris database<jupyter_code>iris = load_iris() X = iris.data y = iris.target X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=42) from sklearn.preprocessing import StandardScaler # Create a StandardScater model and fit it to the training data X_scaler = StandardScaler().fit(X_train) X_train_scaled = X_scaler.transform(X_train) X_test_scaled = X_scaler.transform(X_test) # Loop through different k values to see which has the highest accuracy # Note: We only use odd numbers because we don't want any ties train_scores = [] test_scores = [] for k in range(1, 20, 2): knn = KNeighborsClassifier(n_neighbors=k) knn.fit(X_train_scaled, y_train) train_score = knn.score(X_train_scaled, y_train) test_score = knn.score(X_test_scaled, y_test) train_scores.append(train_score) test_scores.append(test_score) print(f"k: {k}, Train/Test Score: {train_score:.3f}/{test_score:.3f}") plt.plot(range(1, 20, 2), train_scores, marker='o') plt.plot(range(1, 20, 2), test_scores, marker="x") plt.xlabel("k neighbors") plt.ylabel("Testing accuracy Score") plt.show() # Note that k: 9 provides the best accuracy where the classifier starts to stablize knn = KNeighborsClassifier(n_neighbors=9) knn.fit(X_train_scaled, y_train) #print('k=9 Test Acc: %.3f' % knn.score(X_test_scaled, y_test)) #print("k=9 Test Acc: {}".format(knn.score(X_test_scaled, y_test))) print(f"k=9 Test Acc: {knn.score(X_test_scaled, y_test)}") <jupyter_output>k=9 Test Acc: 1.0 <jupyter_text>## prediction<jupyter_code>new_iris_data = [[4.3, 3.2, 1.3, 0.2]] predicted_class = knn.predict(new_iris_data) print(predicted_class) n2 = iris.data[10] import numpy as np n2_predicted_class = knn.predict(np.array([n2])) n2_predicted_class<jupyter_output><empty_output><jupyter_text>## sample 2 - Diabaties<jupyter_code>import pandas as pd df = pd.read_csv('data/diabetes.csv') df.head() yd = df["Outcome"] Xd = df.drop("Outcome", axis=1) Xd.head() Xd_train, Xd_test, yd_train, yd_test = train_test_split(Xd, yd, random_state=42) # Create a StandardScater model and fit it to the training data Xd_scaler = StandardScaler().fit(Xd_train) Xd_train_scaled = Xd_scaler.transform(Xd_train) Xd_test_scaled = Xd_scaler.transform(Xd_test) # Loop through different k values to see which has the highest accuracy # Note: We only use odd numbers because we don't want any ties train_scores = [] test_scores = [] for k in range(1, 20, 2): knn = KNeighborsClassifier(n_neighbors=k) knn.fit(Xd_train_scaled, yd_train) train_score = knn.score(Xd_train_scaled, yd_train) test_score = knn.score(Xd_test_scaled, yd_test) train_scores.append(train_score) test_scores.append(test_score) print(f"k: {k}, Train/Test Score: {train_score:.3f}/{test_score:.3f}") plt.plot(range(1, 20, 2), train_scores, marker='o') plt.plot(range(1, 20, 2), test_scores, marker="x") plt.xlabel("k neighbors") plt.ylabel("Testing accuracy Score") plt.show() # Note that k: 13 seems to be the best choice for this dataset knn = KNeighborsClassifier(n_neighbors=13) knn.fit(Xd_train_scaled, yd_train) print(f'k=13 Test Acc: {knn.score(Xd_test_scaled, yd_test)}')<jupyter_output>k=13 Test Acc: 0.6770833333333334
no_license
/classification/.ipynb_checkpoints/Untitled-checkpoint.ipynb
tonyzhao668/machinelearning_practice
3
<jupyter_start><jupyter_text># BeautifulSoup 기초## BeautifulSoup 특징 * HTML과 XML 파일에서 데이터를 뽑아내기 위한 파이썬 라이브러리 * HTML과 XML의 트리 구조를 탐색, 검색, 변경 가능 * 다양한 파서(parser)를 선택하여 이용 가능 | 파서(parser) | 선언 | 장점 | 단점 | |---------------|------|------|------| | html.parser | `BeautifulSoup(markup, 'html.parser')` | 설치 필요 없음적절한 속도 | | | lxml HTML parser | `BeautifulSoup(markup, 'lxml')` | 매우 빠름 | lxml 추가 설치 필요 | | lxml XML parser | `BeautifulSoup(markup, 'lxml-xml')``BeautifulSoup(markup, 'xml')` | 매우 빠름유일한 xml parser | lxml 추가 설치 필요 | | html5lib | `BeautifulSoup(markup, 'html5lib')` | 웹 브라우저와 같은 방식으로 파싱유용한 HTML5 생성 | html5lib 추가 설치 필요매우 느림 |## HTML 파싱(Parsing)### 웹페이지 예제 생성<jupyter_code>%%writefile example.html <!DOCTYPE html> <html> <head> <title>Page Title</title> </head> <body> <h1>Heading 1</h1> <p>Paragraph</p> <div> <a href="www.google.com">google</a> </div> <div class="class1"> <p>a</p> <a href="www.naver.com">naver</a> <p>b</p> <p>c</p> </div> <div id="id1"> Example page <p>g</p> </div> </body> </html> from bs4 import BeautifulSoup import urllib.request with open("example.html") as fp: soup = BeautifulSoup(fp, 'html.parser') soup print(soup.prettify())<jupyter_output><!DOCTYPE html> <html> <head> <title> Page Title </title> </head> <body> <h1> Heading 1 </h1> <p> Paragraph </p> <div> <a href="www.google.com"> google </a> </div> <div class="class1"> <p> a </p> <a href="www.naver.com"> naver </a> <p> b </p> <p> c </p> </div> <div id="id1"> Example page <p> g </p> </div> </body> </html> <jupyter_text>### HTML 태그 파싱<jupyter_code>soup.title soup.title.name #title 태그 네임 soup.title.string #title 태그 속 string soup.title.parent.name #title의 parent 태그 네임 soup.h1 soup.p #가장 처음 p 태그 soup.div #가장 처음 div 태그 soup.a<jupyter_output><empty_output><jupyter_text>### HTML 태그 검색* `find()`: 해당 조건에 맞는 하나의 태그를 가져옴 * `find_all()`: 해당 조건에 맞는 모든 태그를 가져옴 * `select()`: CSS 선택자와 같은 형식으로 선택 가능<jupyter_code>soup_find = soup.find("div") print(soup_find) soup_find_all = soup.find_all("div") print(soup_find_all) #리스트 형태로 반환 find_by_id = soup.find_all('div', {'id':'id1'}) # {}: 속성값 print(find_by_id) #리스트 형태로 반환 find_by_class = soup.find_all('div', {'class':'class1'}) print(find_by_class) soup.find('a').get('href') #첫 번째 a 태그의 href 속성값 soup.find('a').get_text() site_names = soup.find_all('a') for name in site_names: print(name.get_text()) id1 = soup.select('div#id1') #css처럼, id는 '#'으로, class는 '.'으로 접근 id1 class1 = soup.select('div.class1') class1 class1_a = soup.select('div.class1 a') #또는 'div.class1 > a' class1_a<jupyter_output><empty_output><jupyter_text>## 웹페이지 콘텐츠 가져오기<jupyter_code>%%writefile anthem.html <!DOCTYPE html> <html> <head> </head> <body> <div> <p id="title">애국가</p> <p id="content"> 동해물과 백두산이 마르고 닳도록 하느님이 보우하사 우리나라 만세.<br /> 무궁화 삼천리 화려 강산 대한 사람, 대한으로 길이 보전하세.<br /> </p> <p id="content"> 남산 위에 저 소나무, 철갑을 두른 듯 바람 서리 불변함은 우리 기상일세.<br /> 무궁화 삼천리 화려 강산 대한 사람, 대한으로 길이 보전하세.<br /> </p> <p id="content"> 가을 하늘 공활한데 높고 구름 없이 밝은 달은 우리 가슴 일편단심일세.<br /> 무궁화 삼천리 화려 강산 대한 사람, 대한으로 길이 보전하세.<br /> </p> <p id="content"> 이 기상과 이 맘으로 충성을 다하여 괴로우나 즐거우나 나라 사랑하세.<br /> 무궁화 삼천리 화려 강산 대한 사람, 대한으로 길이 보전하세.<br /> </p> </div> </body> </html> with open("anthem.html") as fp: soup = BeautifulSoup(fp, 'html.parser') soup title = soup.find('p', {'id':'title'}) contents = soup.find_all('p', {'id':'content'}) print(title.get_text()) for content in contents: print(content.get_text())<jupyter_output>애국가 동해물과 백두산이 마르고 닳도록 하느님이 보우하사 우리나라 만세. 무궁화 삼천리 화려 강산 대한 사람, 대한으로 길이 보전하세. 남산 위에 저 소나무, 철갑을 두른 듯 바람 서리 불변함은 우리 기상일세. 무궁화 삼천리 화려 강산 대한 사람, 대한으로 길이 보전하세. 가을 하늘 공활한데 높고 구름 없이 밝은 달은 우리 가슴 일편단심일세. 무궁화 삼천리 화려 강산 대한 사람, 대한으로 길이 보전하세. 이 기상과 이 맘으로 충성을 다하여 괴로우나 즐거우나 나라 사랑하세. 무궁화 삼천리 화려 강산 대한 사람, 대한으로 길이 보전하세. <jupyter_text>## 인터넷 웹페이지 가져오기<jupyter_code>url = "http://suanlab.com" html = urllib.request.urlopen(url).read() #해당 url의 웹페이지에 대한 html 문서 읽어옴 soup = BeautifulSoup(html, 'html.parser') soup labels = soup.find_all(['label']) for label in labels: print(label.get_text()) labels = soup.select('#wrapper > section > div > div > div > div > div > label') #구조선택자 관련된 부분은 지워줌 ex) div:nth-child(1) -> div for label in labels: print(label.get_text())<jupyter_output>[2020-05-20] "인공지능의 보안 위협" 칼럼 [2020-03-04] "데이터 경제 시대" 칼럼 [2019-12-25] "마이데이터 시대의 도래 데이터 주권과 새로운 가치" 칼럼 [2019-09-25] "유튜브 탄생과 크리에이터 시대" 칼럼 [2019-09-04] "농업으로 들어간 인공지능" 칼럼 [2019-08-07] "AI시대 지배할 것인가 지배당하며 살 것인가" 칼럼 [2018-12-30] "파이썬으로 텍스트 분석하기" 책 출판
no_license
/BS_BeautyfulSoup_기초.ipynb
howecofe/Web-Data-Programming
5
<jupyter_start><jupyter_text># Fully Connected Neural Network Notebook inspired by https://github.com/aymericdamien/TensorFlow-Examples/ Example is using the [MNIST database of handwritten digits](http://yann.lecun.com/exdb/mnist/)<jupyter_code># Import MINST data from tensorflow.examples.tutorials.mnist import input_data mnist = input_data.read_data_sets("/tmp/data/", one_hot=True) import tensorflow as tf # Parameters learning_rate = 0.001 training_epochs = 15 batch_size = 100 display_step = 1 # Network Parameters n_hidden = 256 n_input = 784 # MNIST data input (img shape: 28*28) n_classes = 10 # MNIST total classes (0-9 digits) tf.reset_default_graph() # tf Graph input x = tf.placeholder("float", [None, n_input]) y = tf.placeholder("float", [None, n_classes])<jupyter_output><empty_output><jupyter_text>## Exercise 1 Define a function that builds a fully connected neural network. You will need to complete these steps: 1. define 4 `tf.Variable` with the appropriate shapes for W, b, W_out, b_out. Initialize them with random values. - define a super simple network with 1 layer that performs the operation: relu(x * W + b) - define an output layer that performs the operation: softmax(x * W_out + b_out) - encapsulate these in a function called `dnn` that takes `x` as input and returns the output layer<jupyter_code># Create model def dnn(x, n_hidden_1): # your code here # Construct model pred = dnn(x, n_hidden)<jupyter_output><empty_output><jupyter_text>What does the graph look like for this network?<jupyter_code>g = tf.get_default_graph() [op.name for op in g.get_operations()] # Define loss and optimizer cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=pred, labels=y)) optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost) # Initializing the variables init = tf.global_variables_initializer() # Launch the graph with tf.Session() as sess: sess.run(init) # Training cycle for epoch in range(training_epochs): avg_cost = 0. total_batch = int(mnist.train.num_examples/batch_size) # Loop over all batches for i in range(total_batch): batch_x, batch_y = mnist.train.next_batch(batch_size) # Run optimization op (backprop) and cost op (to get loss value) _, c = sess.run([optimizer, cost], feed_dict={x: batch_x, y: batch_y}) # Compute average loss avg_cost += c / total_batch # Display logs per epoch step if epoch % display_step == 0: print "Epoch:", '%04d' % (epoch+1), "cost=", \ "{:.9f}".format(avg_cost) print "Optimization Finished!" # Test model correct_prediction = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1)) # Calculate accuracy accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float")) print "Accuracy:", accuracy.eval({x: mnist.test.images, y: mnist.test.labels})<jupyter_output><empty_output><jupyter_text>## Exercise 2 1. modify the `dnn` function adding a second hidden layer also with `relu` activation<jupyter_code># Create model def dnn_2(x, n_hidden_1, n_hidden_2): # your code here pred = dnn_2(x, 512, 256) # Define loss and optimizer cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=pred, labels=y)) optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost) init = tf.global_variables_initializer() # Launch the graph with tf.Session() as sess: sess.run(init) # Training cycle for epoch in range(training_epochs): avg_cost = 0. total_batch = int(mnist.train.num_examples/batch_size) # Loop over all batches for i in range(total_batch): batch_x, batch_y = mnist.train.next_batch(batch_size) # Run optimization op (backprop) and cost op (to get loss value) _, c = sess.run([optimizer, cost], feed_dict={x: batch_x, y: batch_y}) # Compute average loss avg_cost += c / total_batch # Display logs per epoch step if epoch % display_step == 0: print "Epoch:", '%04d' % (epoch+1), "cost=", \ "{:.9f}".format(avg_cost) print "Optimization Finished!" # Test model correct_prediction = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1)) # Calculate accuracy accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float")) print "Accuracy:", accuracy.eval({x: mnist.test.images, y: mnist.test.labels}) <jupyter_output><empty_output>
no_license
/advanced_deep_learning/1d_fully_connected.ipynb
marionleborgne/ml
4
<jupyter_start><jupyter_text># List of Problems - [Problem midpoint](#problem_midpoint) - [Problem tableau](#problem_tableau) - [Problem Runge Kutta4](#problem_rk4) - [Problem embedded](#problem_embedded) - [Problem coding A](#prob_a) - [Problem coding B](#prob_b) - [Problem coding C](#prob_c) # Objectives In this lab, you will explore Runge-Kutta methods for solving ordinary differential equations. The goal is to gain a better understanding of some of the more popular Runge-Kutta methods and the corresponding numerical code. Specifically you will be able to: - describe the mid-point method - construct a Runge-Kutta tableau from equations or equations from a tableau - describe how a Runge-Kutta method estimates truncation error - edit a working Octave code to use a different method or solve a different problem# Readings There is no required reading for this lab, beyond the contents of the lab itself. However, if you would like additional background on any of the following topics, then refer to the sections indicated below. **Runge-Kutta Methods:** - Newman, Chapter 8 - Press, et al.  Section 16.1 - Burden & Faires  Section 5.4 # Solving Ordinary Differential Equations with the Runge-Kutta Methods Ordinary differential equations (ODEs) arise in many physical situations. For example, there is the first-order Newton cooling equation discussed in , and perhaps the most famous equation of all, the second-order Newton’s Second Law of Mechanics $F=ma$ . In general, higher-order equations, such as Newton’s force equation, can be rewritten as a system of first-order equations . So the generic problem in ODEs is a set of N coupled first-order differential equations of the form, $$ \frac{d{\bf y}}{dt} = f({\bf y},t) $$ where ${\bf y}$ is a vector of variables. For a complete specification of the solution, boundary conditions for the problem must be given. Typically, the problems are broken up into two classes: - **Initial Value Problem (IVP)**: the initial values of ${\bf y}$ are specified. - **Boundary Value Problem (BVP)**: ${\bf y}$ is specified at the initial and final times. For this lab, we are concerned with the IVP’s. BVP’s tend to be much more difficult to solve and involve techniques which will not be dealt with in this set of labs. Now as was pointed out in , in general, it will not be possible to find exact, analytic solutions to the ODE. However, it is possible to find an approximate solution with a finite difference scheme such as the forward Euler method . This is a simple first-order, one-step scheme which is easy to implement. However, this method is rarely used in practice as it is neither very stable nor accurate. The higher-order Taylor methods discussed in are one alternative but involve higher-order derivatives that must be calculated by hand or worked out numerically in a multi-step scheme. Like the forward Euler method, stability is a concern. The Runge-Kutta methods are higher-order, one-step schemes that makes use of information at different *stages* between the beginning and end of a step. They are more stable and accurate than the forward Euler method and are still relatively simple compared to schemes such as the multi-step predictor-corrector methods or the Bulirsch-Stoer method. Though they lack the accuracy and efficiency of these more sophisticated schemes, they are still powerful methods that almost always succeed for non-stiff IVPs.# The Midpoint Method: A Two-Stage Runge-Kutta Method The forward Euler method takes the solution at time $t_n$ and advances it to time $t_{n+1}$ using the value of the derivative $f(y_n,t_n)$ at time $t_n$ $$y_{n+1} = y_n + h f(y_n,t_n)$$ where $h \equiv \Delta t$.<jupyter_code>from IPython.display import Image Image(filename="images/euler.png")<jupyter_output><empty_output><jupyter_text>Figure euler: The forward Euler method is essentially a straight-line approximation to the solution, over the interval of one step, using the derivative at the starting point as the slope. The idea of the Runge-Kutta schemes is to take advantage of derivative information at the times between $t_n$ and $t_{n+1}$ to increase the order of accuracy. For example, in the midpoint method, the derivative at the initial time is used to approximate the derivative at the midpoint of the interval, $f(y_n+\frac{1}{2}hf(y_n,t_n), t_n+\frac{1}{2}h)$. The derivative at the midpoint is then used to advance the solution to the next step. The method can be written in two *stages* $k_i$, $$ \begin{aligned} \begin{array}{l} k_1 = h f(y_n,t_n)\\ k_2 = h f(y_n+\frac{1}{2}k_1, t_n+\frac{1}{2}h)\ \ \rm{eq: midpoint}\\ y_{n+1} = y_n + k_2 \end{array} \end{aligned} $$ The midpoint method is known as a 2-stage Runge-Kutta formula. <jupyter_code>Image(filename='images/midpoint.png')<jupyter_output><empty_output><jupyter_text>Figure midpoint: The midpoint method again uses the derivative at the starting point to approximate the solution at the midpoint. The derivative at the midpoint is then used as the slope of the straight-line approximation.# Second-Order Runge-Kutta Methods As was shown in lab 2 , the error in the forward Euler method is proportional to $h$. In other words, the forward Euler method has an accuracy which is *first order* in $h$. The advantage of the midpoint method is that the extra derivative information at the midpoint results in the first order error term cancelling out, making the method *second order* accurate. This can be shown by a Taylor expansion of equation [eq: midpoint](#eq_midpoint) **Problem midpoint**: Even though the midpoint method is second-order accurate, it may still be less accurate than the forward Euler method. In the demo below, compare the accuracy of the two methods on the initial value problem $$ \frac{dy}{dt} = -y +t +1, \;\;\;\; y(0) =1\ \ \textbf{eq: linexp} $$ which has the exact solution $$ y(t) = t + e^{-t} $$1. Why is it possible that the midpoint method may be less accurate than the forward Euler method, even though it is a higher order method? 2. Based on the numerical solutions of (eq: linexp), which method appears more accurate? 3. Cut the stepsize in half and check the error at a given time. Repeat a couple of more times. How does the error drop relative to the change in stepsize? 4. How do the numerical solutions compare to $y(t) = t + e^{-t}$ when you change the initial time? Why?<jupyter_code>%matplotlib inline from numlabs.lab4.lab4_functions import initinter41,eulerinter41,midpointinter41 import numpy as np from matplotlib import pyplot as plt initialVals={'yinitial': 1,'t_beg':0.,'t_end':1.,'dt':0.25,'c1':-1.,'c2':1.,'c3':1.} coeff = initinter41(initialVals) timeVec=np.arange(coeff.t_beg,coeff.t_end,coeff.dt) nsteps=len(timeVec) ye=[] ym=[] y=coeff.yinitial ye.append(coeff.yinitial) ym.append(coeff.yinitial) for i in np.arange(1,nsteps): ynew=eulerinter41(coeff,y,timeVec[i-1]) ye.append(ynew) ynew=midpointinter41(coeff,y,timeVec[i-1]) ym.append(ynew) y=ynew analytic=timeVec + np.exp(-timeVec) theFig,theAx=plt.subplots(1,1) l1=theAx.plot(timeVec,analytic,'b-',label='analytic') theAx.set_xlabel('time (seconds)') l2=theAx.plot(timeVec,ye,'r-',label='euler') l3=theAx.plot(timeVec,ym,'g-',label='midpoint') theAx.legend(loc='best') theAx.set_title('interactive 4.1')<jupyter_output><empty_output><jupyter_text>In general, an *explicit* 2-stage Runge-Kutta method can be written as, $$ \begin{aligned} \begin{array}{l} k_1 = h f(y_n,t_n)\\ k_2 = h f(y_n+b_{21}k_1, t_n+a_2h)\ \ \ \textbf{eq: explicitrk2} \\ y_{n+1} = y_n + c_1k_1 +c_2k_2 \end{array} \end{aligned} $$ The scheme is said to be *explicit* since a given stage does not depend *implicitly* on itself, as in the backward Euler method , or on a later stage. Other explicit second-order schemes can be derived by comparing the formula [eq: explicitrk2](#eq: explicitrk2) to the second-order Taylor method and matching terms to determine the coefficients $a_2$, $b_{21}$, $c_1$ and $c_2$. See [Appendix midpoint](#app_midpoint) for the derivation of the midpoint method.## The Runge-Kutta Tableau A general s-stage Runge-Kutta method can be written as, $$ \begin{aligned} \begin{array}{l} k_i = h f(y_n+ {\displaystyle \sum_{j=1}^{s} } b_{ij}k_j, t_n+a_ih), \;\;\; i=1,..., s\\ y_{n+1} = y_n + {\displaystyle \sum_{j=1}^{s}} c_jk_j \end{array} \end{aligned} $$ An *explicit* Runge-Kutta method has $b_{ij}=0$ for $i\leq j$, i.e. a given stage $k_i$ does not depend on itself or a later stage $k_j$. The coefficients can be expressed in a tabular form known as the Runge-Kutta tableau. $$ \begin{array}{|c|c|cccc|c|} \hline i & a_i &{b_{ij}} & & && c_i \\ \hline 1 & a_1 & b_{11} & b_{12} & ... & b_{1s} & c_1\\ 2 & a_2 & b_{21} & b_{22} & ... & b_{2s} & c_2\\ \vdots & \vdots & \vdots & \vdots & & \vdots & \vdots\\ s &a_s & b_{s1} & b_{s2} & ... & b_{ss} & c_s\\\hline {j=} & & 1 \ 2 & ... & s & \\ \hline \end{array} $$ An explicit scheme will be strictly lower-triangular. For example, a general 2-stage Runge-Kutta method, $$ \begin{aligned} \begin{array}{l} k_1 = h f(y_n+b_{11}k_1+b_{12}k_2,t_n+a_1h)\\ k_2 = h f(y_n+b_{21}k_1+b_{22}k_2, t_n+a_2h)\\ y_{n+1} = y_n + c_1k_1 +c_2k_2 \end{array} \end{aligned} $$ has the coefficients, $$ \begin{array}{|c|c|cc|c|} \hline i & a_i & {b_{ij}} & & c_i \\ \hline 1 & a_1 & b_{11} & b_{12} & c_1\\ 2 & a_2 & b_{21} & b_{22} & c_2\\ \hline {j=} & & 1 & 2 & \\ \hline \end{array} $$ In particular, the midpoint method is given by the tableau, $$ \begin{array}{|c|c|cc|c|} \hline i & a_i & {b_{ij}} & & c_i \\ \hline 1 & 0 & 0 & 0 & 0\\ 2 & \frac{1}{2} & \frac{1}{2} & 0 & 1\\ \hline {j=} & & 1 & 2 & \\ \hline \end{array} $$ **Problem tableau**: Write out the tableau for 1. [Heun’s method](#eq_heuns) 2. the fourth-order Runge-Kutta method ([lab4:eq:rk4]) discussed in the next section. ## Explicit Fourth-Order Runge-Kutta Method Explicit Runge-Kutta methods are popular as each stage can be calculated with one function evaluation. In contrast, implicit Runge-Kutta methods usually involves solving a non-linear system of equations in order to evaluate the stages. As a result, explicit schemes are much less expensive to implement than implicit schemes. However, there are cases in which implicit schemes are necessary and that is in the case of *stiff* sets of equations. See section 16.6 of Press et al. for a discussion. For these labs, we will focus on non-stiff equations and on explicit Runge-Kutta methods. The higher-order Runge-Kutta methods can be derived by in manner similar to the midpoint formula. An s-stage method is compared to a Taylor method and the terms are matched up to the desired order. Methods of order $M > 4$ require $M+1$ or $M+2$ function evaluations or stages, in the case of explicit Runge-Kutta methods. As a result, fourth-order Runge-Kutta methods have achieved great popularity over the years as they require only four function evaluations per step. In particular, there is the classic fourth-order Runge-Kutta formula: $$ \begin{aligned} \begin{array}{l} k_1 = h f(y_n,t_n)\\ k_2 = h f(y_n+\frac{k_1}{2}, t_n+\frac{h}{2})\\ k_3 = h f(y_n+\frac{k_2}{2}, t_n+\frac{h}{2})\\ k_4 = h f(y_n+k_3, t_n+h)\\ y_{n+1} = y_n + \frac{k_1}{6}+ \frac{k_2}{3}+ \frac{k_3}{3} + \frac{k_4}{6} \end{array} \end{aligned} $$ **Problem rk4**: In the demo below, compare compare solutions to the test problem (eq: test) $$ \frac{dy}{dt} = -y +t +1, \;\;\;\; y(0) =1\ \ \ \mathbf{eq: test} $$ generated with the fourth-order Runge-Kutta method to solutions generated by the forward Euler and midpoint methods. 1. Based on the numerical solutions of (eq: test), which of the three methods appears more accurate? 2. Again determine how the error changes relative to the change in stepsize, as the stepsize is halved.<jupyter_code>from numlabs.lab4.lab4_functions import initinter41,eulerinter41,midpointinter41,\ rk4ODEinter41 initialVals={'yinitial': 1,'t_beg':0.,'t_end':1.,'dt':0.05,'c1':-1.,'c2':1.,'c3':1.} coeff = initinter41(initialVals) timeVec=np.arange(coeff.t_beg,coeff.t_end,coeff.dt) nsteps=len(timeVec) ye=[] ym=[] yrk=[] y=coeff.yinitial ye.append(coeff.yinitial) ym.append(coeff.yinitial) yrk.append(coeff.yinitial) for i in np.arange(1,nsteps): ynew=eulerinter41(coeff,y,timeVec[i-1]) ye.append(ynew) ynew=midpointinter41(coeff,y,timeVec[i-1]) ym.append(ynew) ynew=rk4ODEinter41(coeff,y,timeVec[i-1]) yrk.append(ynew) y=ynew analytic=timeVec + np.exp(-timeVec) theFig=plt.figure(0) theFig.clf() theAx=theFig.add_subplot(111) l1=theAx.plot(timeVec,analytic,'b-',label='analytic') theAx.set_xlabel('time (seconds)') l2=theAx.plot(timeVec,ye,'r-',label='euler') l3=theAx.plot(timeVec,ym,'g-',label='midpoint') l4=theAx.plot(timeVec,yrk,'m-',label='rk4') theAx.legend(loc='best') theAx.set_title('interactive 4.2')<jupyter_output><empty_output><jupyter_text>## Embedded Runge-Kutta Methods: Estimate of the Truncation Error It is possible to find two methods of different order which share the same stages $k_i$ and differ only in the way they are combined, i.e. the coefficients $c_i$. For example, the original so-called embedded Runge-Kutta scheme was discovered by Fehlberg and consisted of a fourth-order scheme and fifth-order scheme which shared the same six stages. In general, a fourth-order scheme embedded in a fifth-order scheme will share the stages $$ \begin{aligned} \begin{array}{l} k_1 = h f(y_n,t_n)\\ k_2 = h f(y_n+b_{21}k_1, t_n+a_2h)\\ \vdots \\ k_6 = h f(y_n+b_{51}k_1+ ...+b_{56}k_6, t_n+a_6h) \end{array} \end{aligned} $$ The fifth-order formula takes the step: $$ y_{n+1}=y_n+c_1k_1+c_2k_2+c_3k_3+c_4k_4+c_5k_5+c_6k_6 $$ while the embedded fourth-order formula takes a different step: $$ y_{n+1}^*=y_n+c^*_1k_1+c^*_2k_2+c^*_3k_3+c^*_4k_4+c^*_5k_5+c^*_6k_6 $$ If we now take the difference between the two numerical estimates, we get an estimate $\Delta_{\rm spec}$ of the truncation error for the fourth-order method, $$ \Delta_{\rm est}(i)=y_{n+1}(i) - y_{n+1}^{*}(i) = \sum^{6}_{i=1}(c_i-c_{i}^{*})k_i $$ This will prove to be very useful in the next lab where we provide the Runge-Kutta algorithms with adaptive stepsize control. The error estimate is used as a guide to an appropriate choice of stepsize. An example of an embedded Runge-Kutta scheme was found by Cash and Karp and has the tableau: $$ \begin{array}{|c|c|cccccc|c|c|} \hline i & a_i & {b_{ij}} & & & & & & c_i & c^*_i \\ \hline 1 & & & & & & & & \frac{37}{378} & \frac{2825}{27648}\\ 2 & \frac{1}{5} & \frac{1}{5}& & & & & & 0 &0 \\ 3 & \frac{3}{10} & \frac{3}{40}&\frac{9}{40}& & & & &\frac{250}{621}&\frac{18575}{48384}\\ 4 & \frac{3}{5}&\frac{3}{10}& -\frac{9}{10}&\frac{6}{5}& & & &\frac{125}{594}& \frac{13525}{55296}\\ 5 & 1 & -\frac{11}{54}&\frac{5}{2}&-\frac{70}{27}&\frac{35}{27}& & & 0 & \frac{277}{14336}\\ 6 & \frac{7}{8}& \frac{1631}{55296}& \frac{175}{512}&\frac{575}{13824}& \frac{44275}{110592}& \frac{253}{4096}& & \frac{512}{1771} & \frac{1}{4}\\\hline {j=} & & 1 & 2 & 3 & 4 & 5 & 6 & & \\ \hline \end{array} $$ **Problem embedded**: Though the error estimate is for the embedded fourth-order Runge-Kutta method, the fifth-order method can be used in practice for calculating the solution, the assumption being the fifth-order method should be at least as accurate as the fourth-order method. In the demo below, compare solutions of the test problem (eq: test2]) $$\frac{dy}{dt} = -y +t +1, \;\;\;\; y(0) =1\ \ \ \mathbf{eq: test2}$$ generated by the fifth-order method with solutions generated by the standard fourth-order Runge-Kutta method. Which method is more accurate? Again, determine how the error decreases as you halve the stepsizes. <jupyter_code>import numpy as np from matplotlib import pyplot as plt from numlabs.lab4.lab4_functions import initinter41,rk4ODEinter41,rkckODEinter41 initialVals={'yinitial': 1,'t_beg':0.,'t_end':1.,'dt':0.2,'c1':-1.,'c2':1.,'c3':1.} coeff = initinter41(initialVals) timeVec=np.arange(coeff.t_beg,coeff.t_end,coeff.dt) nsteps=len(timeVec) ye=[] ym=[] yrk=[] yrkck=[] y1=coeff.yinitial y2=coeff.yinitial yrk.append(coeff.yinitial) yrkck.append(coeff.yinitial) for i in np.arange(1,nsteps): ynew=rk4ODEinter41(coeff,y1,timeVec[i-1]) yrk.append(ynew) y1=ynew ynew=rkckODEinter41(coeff,y2,timeVec[i-1]) yrkck.append(ynew) y2=ynew analytic=timeVec + np.exp(-timeVec) theFig,theAx=plt.subplots(1,1) l1=theAx.plot(timeVec,analytic,'b-',label='analytic') theAx.set_xlabel('time (seconds)') l2=theAx.plot(timeVec,yrkck,'g-',label='rkck') l3=theAx.plot(timeVec,yrk,'m-',label='rk') theAx.legend(loc='best') theAx.set_title('interactive 4.3')<jupyter_output><empty_output><jupyter_text># moving from a notebook to a library If we want our ODE routines to be more generally useful, we need to lift two restrictions from the code: 1. Inital conditions are currently specified in the main script as a dictionary, e.g.: ```python initialVals={'yinitial': 1,'t_beg':0.,'t_end':1., 'dt':0.2,'c1':-1.,'c2':1.,'c3':1.} ``` and then converted to a named tuple in [initinter41](https://github.com/phaustin/numeric/blob/master/numlabs/lab4/lab4_functions.py#L5-L9) We need to move this into an external configuration file that we can keep track of using version control, so we can keep the library code and the input and output files in separate folders and keep a record of our runs. 2. The derivatives are hardwired into the library, for example in [eulerinter41](https://github.com/phaustin/numeric/blob/3bab591fb584abbc95757eb40ae5c83dce3cb94a/numlabs/lab4/lab4_functions.py#L15-L17), . We need to be able to have integrators work with any derivative function. ## Writing a config file Python has a variety of configuration libraries, including [configparser](https://docs.python.org/3.4/library/configparser.html) in the standard library. This is overkill, however, for our simple programs. We just need a way to input and output a dictionary in human readable form. One example of how to do this is [write_init.py](https://github.com/phaustin/numeric/blob/master/numlabs/lab4/example/write_init.py): ```python """ write the initial condition file for the simple oscillator example """ import json initialVals={'yinitial': [0.,1.],'t_beg':0.,'t_end':40.,'dt':0.1,'c1':0.,'c2':1.} initialVals['comment'] = 'written Sep. 29, 2015' initialVals['plot_title'] = 'simple damped oscillator run 1' with open('run_1.json','w') as f: f.write(json.dumps(initialVals,indent=4)) ``` When you run this from the command line or IPython with: In [19]: run write_init you get a json [Javascript Object Notation](https://en.wikipedia.org/wiki/JSON) file that looks like this: ```javascript { "t_beg": 0.0, "c1": 0.0, "c2": 1.0, "t_end": 40.0, "dt": 0.1, "plot_title": "simple damped oscillator run 1", "comment": "written Sep. 29, 2015", "yinitial": [ 0.0, 1.0 ] } ``` This format is simple enough to change with a text editor. To load this into a program, do something like [read_init.py](https://github.com/phaustin/numeric/blob/master/numlabs/lab4/example/read_init.py): ```python import json from collections import namedtuple with open('run_1.json','r') as f: init_dict=json.load(f) print('as a dictionary:\n{}\n'.format(init_dict)) #either use this as a dict or convert to a namedtuple initvals=namedtuple('initvals','dt c1 c2 t_beg t_end yinitial comment plot_title') theCoeff=initvals(**init_dict) print('as a namedtuple:\n{}'.format(theCoeff)) ``` which produces: ``` In [21]: run read_init as a dictionary: {'yinitial': [0.0, 1.0], 'c2': 1.0, 'plot_title': 'simple damped oscillator run 1', 'comment': 'written Sep. 29, 2015', 't_end': 40.0, 'c1': 0.0, 't_beg': 0.0, 'dt': 0.1} as a namedtuple: initvals(dt=0.1, c1=0.0, c2=1.0, t_beg=0.0, t_end=40.0, yinitial=[0.0, 1.0], comment='written Sep. 29, 2015', plot_title='simple damped oscillator run 1') ``` ## Passing a derivative function to an integrator In python, functions are first class objects, which means you can pass them around like any other datatype, no need to get function handles as in matlab or Fortran. The integrators in [test.py](https://github.com/phaustin/numeric/blob/master/numlabs/lab4/example/test.py) have been written to accept a derivative function of the form: ```python def derivs4(coeff, y): ``` i.e. as long as the derivative can be written in terms of coefficients and the previous value of y, the integrator will move the ode ahead one timestep. If we wanted coefficients that were a function of time, we would need to also include those functions the coeff namedtuple, and add keep track of the timestep through the integration. Here's an example using foward euler to integrate the harmonic oscillator<jupyter_code>%matplotlib inline import json from numlabs.lab4.example.test import read_init,euler4 # # specify the derivs function # def derivs(coeff, y): f=np.empty_like(y) #create a 2 element vector to hold the derivitive f[0]=y[1] f[1]= -1.*coeff.c1*y[1] - coeff.c2*y[0] return f # # first make sure we have an input file in this directory # initialVals={'yinitial': [0.,1.],'t_beg':0.,'t_end':40.,'dt':0.1,'c1':0.,'c2':1.} initialVals['comment'] = 'written Sep. 29, 2015' initialVals['plot_title'] = 'simple damped oscillator run 1' infile='run_1.json' with open(infile,'w') as f: f.write(json.dumps(initialVals,indent=4)) # # now read the initial information into a namedtuple coeff # infile='run_1.json' coeff=read_init(infile) # # integrate and save the result in savedata # time=np.arange(coeff.t_beg,coeff.t_end,coeff.dt) y=coeff.yinitial nsteps=len(time) savedata=np.empty([nsteps],np.float64) for i in range(nsteps): y=euler4(coeff,y,derivs) savedata[i]=y[0] theFig,theAx=plt.subplots(1,1,figsize=(8,8)) theAx.plot(time,savedata,'o-') theAx.set_title(coeff.plot_title) theAx.set_xlabel('time (seconds)') theAx.set_ylabel('y0') <jupyter_output><empty_output><jupyter_text> **problem coding A**: Try out [the lab4 example](https://github.com/phaustin/numeric/tree/master/numlabs/lab4/example): As set up above, test.py solved the damped, harmonic oscillator with the (unstable) forward Euler method. 1. Write a new routine that solves the harmonic oscilator using [Heun’s method](#eq_heuns) along the lines of the routines in [lab4_functions.py](https://github.com/phaustin/numeric/blob/master/numlabs/lab4/lab4_functions.py) Hand in a notebook with the code and a plot. **problem coding B**: 1. Now solve the following test equation by both the midpoint and Heun’s method and compare. $$f(y,t) = t - y + 1.0$$ Choose two sets of initial conditions and investigate the behaviour. 2. Is there any difference between the two methods when applied to either problem? Should there be? Explain by analyzing the steps that each method is taking. **problem coding C**: 6. Solve the Newtonian cooling equation of lab 1 by any of the above methods. 7. Hand in some sample plots along with the parameter values and initial conditions used.# Mathematical Notes ## Note on the Derivation of the Second-Order Runge-Kutta Methods A general s-stage Runge-Kutta method can be written as, $$ \begin{aligned} \begin{array}{l} k_i = h f(y_n+ {\displaystyle \sum_{j=1}^{s} } b_{ij}k_j, t_n+a_ih), \;\;\; i=1,..., s\\ y_{n+1} = y_n + {\displaystyle \sum_{j=1}^{s}} c_jk_j \end{array}\end{aligned}$$ where ${\displaystyle \sum_{j=1}^{s} } b_{ij} = a_i$. In particular, an *explicit* 2-stage Runge-Kutta method can be written as, $$ \begin{aligned} \begin{array}{l} k_1 = h f(y_n,t_n)\\ k_2 = h f(y_n+ak_1, t_n+ah)\\ y_{n+1} = y_n + c_1k_1 +c_2k_2 \end{array} \end{aligned} $$ where $ b_{21} = a_2 \equiv a$. So we want to know what values of $a$, $c_1$ and $c_2$ leads to a second-order method, i.e. a method with an error proportional to $h^3$. To find out, we compare the method against a second-order Taylor expansion, $$ y(t_n+h) = y(t_n) + hy^\prime(t_n) + \frac{h^2}{2}y^{\prime \prime}(t_n) + O(h^3) $$ So for the $y_{n+1}$ to be second-order accurate, it must match the Taylor method. In other words, $c_1k_1 +c_2k_2$ must match $hy^\prime(t_n) + \frac{h^2}{2}y^{\prime \prime}$. To do this, we need to express $k_1$ and $k_2$ in terms of derivatives of $y$ at time $t_n$. First note, $k_1 = hf(y_n, t_n) = hy^\prime(t_n)$. Next, we can expand $k_2$ about $(y_n.t_n)$, $$ k_2 = hf(y_n+ak_1, t_n+ah) = h(f + haf_t + haf_yy^\prime + O(h^2)) $$ However, we can write $y^{\prime \prime}$ as, $$ y^{\prime \prime} = \frac{df}{dt} = f_t + f_yy^\prime$$ This allows us to rewrite $k_2$ in terms of $y^{\prime \prime}$, $$k_2 = h(y^\prime + hay^{\prime \prime}+ O(h^2))$$ Substituting these expressions for $k_i$ back into the Runge-Kutta formula gives us, $$y_{n+1} = y_n + c_1hy^\prime +c_2h(y^\prime + hay^{\prime \prime})$$ or $$y_{n+1} = y_n + h(c_1 +c_2)y^\prime + h^2(c_2a)y^{\prime \prime}$$ If we compare this against the second-order Taylor method, we see that we need, $$ \begin{aligned} \begin{array}{l} c_1 + c_2 = 1\\ a c_2 = \frac{1}{2} \end{array} \end{aligned} $$ for the Runge-Kutta method to be second-order. If we choose $a = 1/2$, this implies $c_2 = 1$ and $c_1=0$. This gives us the midpoint method. However, note that other choices are possible. In fact, we have a *one-parameter family* of second-order methods. For example if we choose, $a=1$ and $c_1=c_2=\frac{1}{2}$, we get the *modified Euler method*, $$ \begin{aligned} \begin{array}{l} k_1 = h f(y_n,t_n)\\ k_2 = h f(y_n+k_1, t_n+h)\\ y_{n+1} = y_n + \frac{1}{2}(k_1 +k_2) \end{array} \end{aligned}$$ while the choice $a=\frac{2}{3}$, $c_1=\frac{1}{4}$ and $c_2=\frac{3}{4}$, gives us *Heun’s method*, $$ \begin{aligned} \begin{array}{l} k_1 = h f(y_n,t_n)\\ k_2 = h f(y_n+\frac{2}{3}k_1, t_n+\frac{2}{3}h)\\ y_{n+1} = y_n + \frac{1}{4}k_1 + \frac{3}{4}k_2 \end{array} \end{aligned} $$# Glossary - **driver** A routine that calls the other routines to solve the problem. - **embedded Runge-Kutta methods**: Two Runge-Kutta methods that share the same stages. The difference between the solutions give an estimate of the local truncation error. - **explicit** In an explicit numerical scheme, the calculation of the solution at a given step or stage does not depend on the value of the solution at that step or on a later step or stage. - **fourth-order Runge-Kutta method** A popular fourth-order, four-stage, explicit Runge-Kutta method. - **implicit**: In an implicit numerical scheme, the calculation of the solution at a given step or stage does depend on the value of the solution at that step or on a later step or stage. Such methods are usually more expensive than implicit schemes but are better for handling stiff ODEs. - **midpoint method** : A two-stage, second-order Runge-Kutta method. - **stages**: The approximations to the derivative made in a Runge-Kutta method between the start and end of a step. - **tableau** The tableau for a Runge-Kutta method organizes the coefficients for the method in tabular form. <jupyter_code> <jupyter_output><empty_output>
no_license
/lab4/lab4.ipynb
tjarnikova/numeric
7
<jupyter_start><jupyter_text>## Superposition of energy eigenstates Questions? [email protected] This notebook shows the time-evolution of the sum of two energy eigenstates for a particle in a box (in 1 dimension). This corresponds to problem 3 of chapter 12 of the Durham University Mathematical Physics II lecture notes.<jupyter_code>import numpy as np import matplotlib.pyplot as plt from matplotlib import animation, rc from IPython.display import HTML<jupyter_output><empty_output><jupyter_text>The normalised energy eigenfunctions for a particle in a box of unit size are $$\phi_n(x) = \sqrt{2}\sin(n\pi x)$$ and (setting $\hbar=1$) the energy eigenvalues are $$ E_n = \frac{1}{2} n^2 \pi^2\,.$$<jupyter_code>def phi(n, x): return np.sqrt(2)*np.sin(n*np.pi*x) def E(n): return n**2*np.pi**2/2<jupyter_output><empty_output><jupyter_text>Let's plot the first three for reference:<jupyter_code>x = np.linspace(0,1,100) plt.plot(x, phi(1,x), x,phi(2,x), x, phi(3,x),'-')<jupyter_output><empty_output><jupyter_text>We will look at a wave function which is a superposition of the 1st and 2nd energy eigenstates, $$ \psi(x) = \frac{1}{\sqrt{2}} \Big( \phi_1(x) + \phi_2(x) \Big)\,.$$ The time-evolution can be found by multiplying each term with its own $\exp(-iE t)$ factor, for the appropriate $E$. The probability density is also defined below, by taking the complex norm squared.<jupyter_code>def psi(x,t): return 1/np.sqrt(2)*(np.exp(-1j*E(1)*t)*phi(1,x) + np.exp(-1j*E(2)*t)*phi(2,x)) def P(x,t): return np.real_if_close(psi(x,t)*np.conj(psi(x,t)))<jupyter_output><empty_output><jupyter_text>For $t=0$ and $t=1$ the probability density looks as follows:<jupyter_code>plt.plot(x, P(x,0), x, P(x,1),'-')<jupyter_output><empty_output><jupyter_text>Let's make a movie. The block below sets up a figure environment with appropriate axes ranges $0\leq x\leq 1$ and $0\leq y \leq 3$.<jupyter_code>%%capture fig, ax = plt.subplots(); ax.set_xlim(( 0, 1)) ax.set_ylim(( 0, 3)) ourplot, = ax.plot([], [])<jupyter_output><empty_output><jupyter_text>To animate, we need to define a function which returns the `ourplot` figure for a given time $t$. It actually receives the frame number $i$, and we will set $t=i/50.0/f$ where $f$ is the frequency. So that at $i=50$, we have $t=T$ with $T$ the period of oscillation.<jupyter_code>fpp = 50 # number of animation frames per period w = E(2)-E(1) # angular frequency, see the problem in the notes freq = w/2/np.pi # frequency T = 1/freq # period def animate(i): xv = np.linspace(0, 3, 200) yv = P(xv, i*T/fpp) ourplot.set_data(xv, yv) return (ourplot,) T<jupyter_output><empty_output><jupyter_text>The plot is then made by constructing a `FuncAnimation` object, giving it the canvas object `fig` in which to draw, the function `animate` which returns the figure for a given frame, the number of frames, and the delay/interval (in ms) between each frame.<jupyter_code>anim = animation.FuncAnimation(fig, animate, frames=fpp, interval=50) HTML(anim.to_jshtml())<jupyter_output><empty_output><jupyter_text>Note how you can easily see that the expectation value of the position oscillates with frequency $\omega/(2\pi)$ (the duration of one run of the animation), where $\omega = E_2 - E_1$, as computed in the problem.## More complicated example A more complicated example consists of a superposition of 4 energy eigenstates, such as to approximate the evolution of $$\psi(x, t=0) = \sqrt{858} x (x-1)^5\, .$$ The expansion coefficients are stated below but can be obtained by projecting the above wave function on each eigenstate.<jupyter_code>c = [-0.622, -0.655, -0.370, -0.177] def psi2(x,t): tot = 0 for i in range(1,5): tot += c[i-1] * np.exp(-1j*E(i)*t)*phi(i,x) return tot def P2(x,t): return np.real_if_close(psi2(x,t)*np.conj(psi2(x,t))) plt.plot(x, np.real(psi2(x, 0)), 'r--', x, np.sqrt(858)*x*(x-1)**5, 'b-') fpp = 100 # number of animation frames per period T = 1 def animate2(i): xv = np.linspace(0, 3, 200) yv = P2(xv, i*T/fpp) ourplot.set_data(xv, yv) return (ourplot,) anim2 = animation.FuncAnimation(fig, animate2, frames=200, interval=50) HTML(anim2.to_jshtml())<jupyter_output><empty_output>
non_permissive
/sum_of_eigenstates.ipynb
kpeeters/quantum_notebooks
9
<jupyter_start><jupyter_text># TRANSFORMASI DATA <jupyter_code>import pandas as pd import numpy as np pcademo = pd.read_csv('G:\Kuliah\Data Mining\Transformasi Data\Praktek\pcademo.csv') x = pcademo.iloc[0:51,1:20] x.tall()<jupyter_output><empty_output>
no_license
/Transformasi Data/Latihan/Untitled.ipynb
rifqirabbanie/Data-Mining
1
<jupyter_start><jupyter_text>### Solutions of PS2 This notebook is in html. To be able to run it, please click: [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/gunerilhan/economicgrowth/blob/master/ps2/ps2.ipynb)<jupyter_code># First import the packages we'll need # Numpy is for numerical analysis import numpy as np # Pandas is for data storage and manipulation import pandas as pd # Matplotlib and seaborn are for plotting import matplotlib.pyplot as plt %matplotlib inline import seaborn as sns from matplotlib import style style.use('https://gunerilhan.github.io/img/fivethirtyeight-modified.mplstyle') # Different color palettes that can be used in plots colors = {0:['#264653','#219D8F','#E9C46A','#F4A261','#E76F51'], 1:['#003049','#D62828','#F77F00','#FCBF49','#EAE2B7'], 2:['#F72585','#7209B7','#3A0CA3','#4361EE','#4CC9F0'], 3: ['#165aa7','#cb495c','#bb60d5','#f47915','#06ab54','#002070','#b27d12','#007030']} <jupyter_output><empty_output><jupyter_text> 1. Go to the following website: https://www.rug.nl/ggdc/productivity/pwt/ Download Penn World Table version 10.0 database in your preferred format. Consider the following countries: the UK, Argentina, Denmark, Central African Republic, Nigeria and Italy. a. Using data in 2019, calculate real output per worker and human capital index for each of the countries listed above. Use - 'cgdpe' (Expenditure-side real GDP at current PPPs (in mil. 2017US$)) as a measure of real GDP, - 'emp' (Number of persons engaged (in millions)) as a measure of employment, - and 'hc' (Human capital index, based on years of schooling and returns to education) as a measure of human capital. b. Assume a Cobb-Douglas production function with $\alpha=1/3$. Suppose countries differ only with respect to their investment rates. For each country listed above, calculate Solow model's predicted output per worker relative to the UK output per worker. For investment rates of countries, use average of variable `csh\_i' (Share of gross capital formation at current PPPs) over the years 1970-2019. Generate a table comparing Solow model's predicted output per worker relative to the UK with actual output per worker relative to the UK (in 2019, or in the latest available year). Hint, I ask you to do an exercise similar to what we have done in lecture 3. Briefly comment on the table. c. Now, assume that countries differ with respect to their investment rates and employment growth rates. Assume that depreciation rates in all countries are equal to 5%, $\delta=0.05$. Calculate average annual employment growth rates of the above listed countries from 1970 to 2019. Repeat the exercise in part b. Create a table that compares Solow model's predicted output per worker differences with actual output per worker differences relative to the UK. Briefly comment on the table. d. Now, assume that countries differ not only in their investment and employment growth rates but also in their human capital. For this exercise use variable hc in 2019 (Human capital index, based on years of schooling and returns to education) as $h$ in the Solow model. Assume that depreciation rates in all countries are equal to 5%, $\delta=0.05$. Repeat part c while considering human capital differences across countries. Create a table and briefly comment on it. e.Do your predictions approach to actual income differences as you take into account more variables? <jupyter_code># Here, I download the Penn World Tables legend df_legend = pd.read_excel('https://www.rug.nl/ggdc/docs/pwt100.xlsx',sheet_name='Legend') df_legend.dropna(subset=['Variable name','Variable definition'],inplace=True) # I create a dictionary of variables names and definitions df_legend = dict(zip(df_legend['Variable name'],df_legend['Variable definition'])) # Here are the variables, we will need in this analysis print('cgdpe = ', df_legend['cgdpe']) print('rnna = ', df_legend['rnna']) print('emp = ', df_legend['emp']) print('hc =', df_legend['hc']) print('csh_i =', df_legend['csh_i']) # now download the data df = pd.read_excel('https://www.rug.nl/ggdc/docs/pwt100.xlsx',sheet_name='Data') # create GDP per worker df['gdpPerworker'] = df['cgdpe']/df.emp # Penn World Tables contain many variables, but these are the variables ... # I need to create the table asked in part a var_list = ['countrycode','country','cgdpe','emp','hc','year'] # List of countries country_list = ['GBR','ARG','DNK','CAF','NGA','ITA'] ## Here, we create the table asked in part a) # This cell does five things: # 1) It selects the countries in my list: df.countrycode.isin(country_list) # 2) It selects year 2017: (df.year==201-) # 3) It selects the variables that I want to display : # [['country','year','gdpPerworker','hc','emp','pop']] # 4) It rounds the variables to 2 decimal points: .round(decimals=2) # 5) Sets country as index: .set_index('country') df[df.countrycode.isin(country_list) & (df.year==2019)][ ['country','year','gdpPerworker','hc','emp','pop']].round(decimals=2) ## To solve part b, I first need to calculate average investment rate ... # for each country from 1970 to 2009 # This cell does the following things: # 1) It selects data from years 1970 to 2019: df[df.year.isin(np.arange(1970,2020))] # np.arange(1970,2020) creates a list from 1970 to 2019. # intervals on python is right-open. Hence np.arange(1970,2020) creates # an array that does not inclue 2020 # 2) it takes averages of the investment rates for each country: #.groupby(['countrycode','country'])['csh_i'].mean() # 3) it resets data index # 4) it renames 'csh_i' variable to "Investment rate" begin,end=1970,2019 df_fundamentals = (df[df.year.isin(np.arange(begin,end+1))] .groupby(['countrycode','country'])['csh_i'].mean().reset_index().rename( columns={'csh_i':'Investment rate'})) # Now look at he investment rate of the countries we calculated above # In fact we calculated invesment rate for each country in our data # we display investment rate only for the countries we are interested in df_fundamentals.set_index('countrycode',inplace=True) df_fundamentals.loc[country_list].round(decimals=2) # In part c, we need to calculate the average employment growth # first sort our data by country and by year # we need by year sorting to ensure that 1970 data comes earlier than 2017 data ... # in our dataset. We need this in the next cell df.sort_values(['countrycode','year'],inplace=True) # This cell calculates the average annual employment growth rate from 1970 to 2017 # It first selects the years 1970 and 2019 # For each country it calculate average annual employment growth rate ... # using this formula = (emp_2019/emp_1970)^(1/49)-1 # there are multiple ways of calculating average employment growth rate,... # the above is one of them # Here is how code works: # 1) it selects data from years 1970 and 2019: df[df.year.isin([1970,2019])] # 2) It groups the data based on countrycodes: .groupby(['countrycode'] # 3) for each country code, we have 2 observations, from 1970 and from 2019 # x['emp'].values[0] is the first observation from 1970 # x['emp'].values[1] is the second observation from 2019 # the average annual employment growth is equal to # x['emp'].values[1]/x['emp'].values[0])**(1/49)-1, ... # where x represent a country # 4) Lastly it renames what we calculated as # 'Employment growth': .rename(columns={0:'Employment growth'}) df_emp_growth = (df[df.year.isin([begin,end])] .groupby(['countrycode']).apply(lambda x: (x['emp'].values[1]/x['emp'].values[0])**(1/(end-begin))-1) .reset_index().rename(columns={0:'Employment growth'})) # show the employment growth rate for the countries we selected df_emp_growth.set_index('countrycode',inplace=True) df_emp_growth.loc[country_list].round(decimals=3)<jupyter_output><empty_output><jupyter_text>Above, since data for CAF is missing in 1970, our method did not give a result for CAF. Alternatively, we could calculate employment growth for each available year, and take average of the employment growth rates.<jupyter_code># merge df_fundamentals data (contains invesment rates) # with df_emp_growth (contains employment growth rate) df_fundamentals= df_fundamentals.join(df_emp_growth,how='left') # in part d, we will need human capital values # merge our df_fundamentals data with the Penn World Tables # but, we don't need all of PWT, select only year 2019, and other required variables df_fundamentals = df_fundamentals.join(df[df.year == end].set_index('countrycode')[ ['emp','gdpPerworker','hc']],how='left') # we will calculate Solow's predicted output per worker relative to the UK # hence, create a different data just for the UK values gbr = df_fundamentals.loc['GBR'] # now add columns to df_fundamentals data, consisting of corresponding values from the UK for var in ['Investment rate','Employment growth','hc','gdpPerworker']: df_fundamentals[f'{var}, GBR'] = gbr[var] # here is our data df_fundamentals.loc[country_list].round(decimals=3) # We have prepared our data to conduct the required analysis # First set our parameter values alpha=1/3 delta = 0.05 # In many parts of the analysis, we will write alpha/(1-alpha)... # create a new variable to redue typing alpham = alpha/(1-alpha)<jupyter_output><empty_output><jupyter_text>Notice that at the steady state of the Solow model income per worker is equal to $$y^\ast = A^{1/(1-\alpha)}\left(\frac{\gamma}{\delta+n} \right)^{\alpha/(1-\alpha)}h.$$ Therefore income per worker ratios are (assuming countries have the same technology, $A$): $$\frac{y_i}{y_{UK}} = \left(\frac{\frac{\gamma_i}{\delta+n_i}}{\frac{\gamma_{UK}}{\delta+n_{UK}}} \right)^{\alpha/(1-\alpha)}\frac{h_i}{h_{UK}}.$$ Rewrite the above formula: $$\frac{y_i}{y_{UK}} = \left(\frac{\gamma_i}{\gamma_{UK}}\right)^{\alpha/(1-\alpha)}\left(\frac{\delta+n_{UK}}{\delta+n_i}\right)^{\alpha/(1-\alpha)}\frac{h_i}{h_{UK}}.$$ In part b, we assume countries differ only with respect to their investment rates. Therefore: $$\frac{y_i}{y_{UK}} = \left(\frac{\gamma_i}{\gamma_{UK}}\right)^{\alpha/(1-\alpha)}.$$ <jupyter_code># calculate Solow's predicted income per worker ratios as in the above formula df_fundamentals['rel_GDP_pred_inv']=(df_fundamentals['Investment rate'] /df_fundamentals['Investment rate, GBR'])**alpham # calculate the actual output per worker rations from data df_fundamentals['rel_GDP'] = (df_fundamentals['gdpPerworker']/ df_fundamentals['gdpPerworker, GBR'])<jupyter_output><empty_output><jupyter_text>In part c, countries differ with respect to their employment growth rates as well as their investment rates: $$\frac{y_i}{y_{UK}} = \left(\frac{\frac{\gamma_i}{\delta+n_i}}{\frac{\gamma_{UK}}{\delta+n_{UK}}} \right)^{\alpha/(1-\alpha)}.$$ <jupyter_code>## calculate Solow's predicted income per worker ratios as in the above formula df_fundamentals['rel_GDP_pred_inv_emp']=((df_fundamentals['Investment rate']/ (delta+df_fundamentals['Employment growth'])) /(df_fundamentals['Investment rate, GBR']/ (delta+df_fundamentals['Employment growth, GBR'])))**alpham<jupyter_output><empty_output><jupyter_text>In part d, countries differ with respect to their human capital, employment growth rate and investment rate $$\frac{y_i}{y_{UK}} = \left(\frac{\frac{\gamma_i}{\delta+n_i}}{\frac{\gamma_{UK}}{\delta+n_{UK}}} \right)^{\alpha/(1-\alpha)}\frac{h_i}{h_{UK}}.$$ <jupyter_code>## calculate Solow's predicted income per worker ratios as in the above formula df_fundamentals['rel_GDP_pred_inv_emp_hc'] = (df_fundamentals['rel_GDP_pred_inv_emp']* df_fundamentals['hc']/df_fundamentals['hc, GBR']) # I need this cell to rename the table columns column_names = dict(zip(['rel_GDP_pred_inv','rel_GDP_pred_inv_emp','rel_GDP_pred_inv_emp_hc','rel_GDP'], ['Prediction, part b','Prediction, part c','Prediction, part d','Actual'])) df_fundamentals.loc[country_list] # here is Solow's predicted output per worker ratios under different assumptions results_1=(df_fundamentals[ ['rel_GDP_pred_inv','rel_GDP_pred_inv_emp','rel_GDP_pred_inv_emp_hc','rel_GDP']].rename( columns=column_names).round(decimals=2)) results_1.loc[country_list]<jupyter_output><empty_output><jupyter_text>In the above table, as we add more variables into our equation, Solow model's predicted income per worker ratios for Argentian, Central African Republic, and Nigeria are getting closer to its actual level, but not for other countries. <jupyter_code>fig,ax = plt.subplots(figsize=(6,6)) ax.scatter(df_fundamentals['rel_GDP_pred_inv'],df_fundamentals['rel_GDP'], sizes=df_fundamentals['emp'].values,color=colors[1][0],alpha=.7,label='$\gamma$') ax.scatter(df_fundamentals['rel_GDP_pred_inv_emp'],df_fundamentals['rel_GDP'], sizes=df_fundamentals['emp'].values,color=colors[1][1],alpha=.7,label='$\gamma,n$') ax.scatter(df_fundamentals['rel_GDP_pred_inv_emp_hc'],df_fundamentals['rel_GDP'], sizes=df_fundamentals['emp'].values,color=colors[1][2],alpha=.7,label='$\gamma,n,h$') ax.set_xlabel('Predicted') ax.set_ylabel('Actual') ticks = [0,.4,.8,1.2,1.6] ax.set_xlim(-0.2,1.7) ax.set_ylim(-0.2,1.7) ax.set_xticks(ticks) ax.set_yticks(ticks) ax.legend(loc='upper left',frameon=False) plt.savefig('./rel_GDP_pred_inv_emp_hc.svg',bbox_inches='tight')<jupyter_output><empty_output><jupyter_text>When we look at all the countries, the model does well as we control more and more variables. However, as shown in our table, the model does not do as well with 4 examples of advanced economies. We could conclude that the Solow model does in well in accounting for the income per worker differences between developed and developing countries. But, it does not do as well for accounting for the income per worker differences between developed countries, like the UK, Italy and Denmark listed in our question. Productivity differences is the main factor accounting the income per differences among the developed countries.2. Consider the extended Solow model. Suppose the production function is $Y = K^\alpha (ehL)^{1-\alpha}.$ a. Derive change in capital per effective worker, $\dot{\tilde{k}}$, as a function of capital per effective worker, $\tilde{k}$, and other exogenous variables in the model. b. Suppose that investment rate is 20%, $\gamma =.2$, depreciation rate is 5\%, $\delta=.05$, population growth rate is 1\%, $n=.01$, labor-augmenting technological progress rate is 2\%, $g = .02$, human capital is equal to 1, $h=1$, and capital income share is .33, $\alpha=.33$. Find steady state capital per effective worker, income per effective worker, consumption per effective worker. c. Suppose at time $t=0$, the economy is at the steady state, and level of labor-augmenting technology is equal to 1, $e(0)=1$. What's the income per worker level at time $t=20$? Remember that if a variable (say M) grows at a constant rate, say m, then the value of M at time $t$ is equal to $\exp(mt)$ times the value of M at time $0$, i.e. $M(t)=M(0)\exp(mt).$ d. Suppose again that at time $t=0$, the economy is at the steady state and level of labor-augmenting technology is equal to 1, $e(0)=1$. Now, suddenly (and unexpectedly) the human capital increases by 10\%, i.e. $h^{new} = 1.1$. Starting from the steady state you found in part b, simulate the model for 100 periods, and calculate capital per effective worker, capital per worker, income per effective worker, and income per worker at each time period. <jupyter_code># First, set our parameter values alpha = .33 delta = .05 h = 1 n = 0.01 g = 0.02 gamma = .2 # this is one way of defining a function in python # f is our production function, output per effective workers f = lambda k,h: k**alpha*h**(1-alpha) # this is our kdot function kdot = lambda k,h: gamma*k**alpha*h**(1-alpha)-(delta+n+g)*k<jupyter_output><empty_output><jupyter_text>At the steady state: $$\tilde{k}^\ast = \left(\frac{\gamma}{n+g+\delta}\right)^{1/(1-\alpha)}h $$ Everytime: $$ \tilde{y} = \tilde{k}^\alpha h^{1-\alpha} $$ $$ \tilde{c} = (1-\gamma)\tilde{y}$$<jupyter_code># calculate the steady state variables k_tilde_ss = (gamma/(n+delta+g))**(1/(1-alpha))*h y_tilde_ss = k_tilde_ss**alpha*h**(1-alpha) c_tilde_ss = y_tilde_ss*(1-alpha) print('Steady state capital per effective worker = ', np.round(k_tilde_ss,decimals=2)) print('Steady state ouput per effective worker = ', np.round(y_tilde_ss,decimals=2)) print('Steady state consumption per effective worker = ', np.round(c_tilde_ss,decimals=2))<jupyter_output>Steady state capital per effective worker = 3.93 Steady state ouput per effective worker = 1.57 Steady state consumption per effective worker = 1.05 <jupyter_text>Recall the definition of $\tilde{y} \equiv \frac{Y}{eL}$ and $y\equiv \frac{Y}{L}$. Therefore, $y=e\tilde{y}$. In part c, the economy is at the steady state, $\tilde{y}\ast$, and $e$ is growing at a constant rate. But we know the initial value of $e(0)=1$, the growth rate of $e$, $g=0.02$. Therefore, $e(20)=e(0)\exp(g*20).$ Hence, $y(20)=e(20)*\tilde{y}^\ast$. <jupyter_code>print('Income per worker at t=20 is equal to', np.round(np.exp(g*20)*y_tilde_ss,decimals=2))<jupyter_output>Income per worker at t=20 is equal to 2.34 <jupyter_text>To solve for part d, we first need to simulate $\tilde{k}$ and $\tilde{y}$ over time. We can quite easily calculate $e$ over time using the initial value of $e$, $e(0)$, and the growth rate of $e$, $g=0.02$. Then using $y(t)=\tilde{y(t)}e(t)$ equality, we can generete $y$ sequence over time.<jupyter_code># k_tilde_seq will be sequence of k tilde over time # since the economy was at the steady state, I initiate k_tilde sequence with 10 values... # all equal to the steady state value # you assume this is the value of k_tilde before time t=0 and at time t=0,... # as there is no change in k_tilde at time t=0. k_tilde will begin increasing at t=1 k_tilde_seq = [k_tilde_ss,]*10 # I also create a sequence of human capital # h is equal to 1 initially, then it becomes 1.1 h_seq = np.ones(111) # assume 9th element of the sequence corresponds to time t=0 h_seq[9:] = 1.1 # starting from the 9th element, or time t=0, simulate the model to get k_tilde over time for t in range(9,110): # k_prime is the next periods capital # k_prime is equal to current capital plus the change in capital k_prime = k_tilde_seq[t]+kdot(k_tilde_seq[t],h_seq[t]) k_tilde_seq.append(k_prime) # generate e sequence as given in the formula: e(t) = e(0)*exp(g*t) e_seq = [np.exp(t*g) for t in range(-9,102)] # k = k_tilde*e k_seq = np.array(k_tilde_seq)*np.array(e_seq) # y_tilde = k_tilde^alpha*h^(1-alpha) y_tilde_seq = [f(k_tilde_seq[t],h_seq[t]) for t in range(111)] # y = y_tilde*e y_seq = np.array(y_tilde_seq)*np.array(e_seq) # now put all these variables into a table df2 = pd.DataFrame({'Time':np.arange(-9,102), 'h':h_seq, 'k tilde':k_tilde_seq, 'y tilde':y_tilde_seq, 'e':e_seq, 'k':k_seq, 'y':y_seq}) # here is how our data looks like df2.head(15) df2[df2.Time==60].round(decimals=2) # plot k_tilde over time fig,ax = plt.subplots() plt.plot(df2.Time,df2['k tilde'],'k',linewidth=2) ax.spines['left'].set_visible(False) ax.spines['right'].set_visible(False) ax.spines['top'].set_visible(False) ax.spines['bottom'].set_visible(False) ax.set_xlabel('Time') ax.set_title(r'$\tilde{k}$') # plot y_tilde over time fig,ax = plt.subplots() plt.plot(df2.Time,df2['y tilde'],'k.',linewidth=2) ax.spines['left'].set_visible(False) ax.spines['right'].set_visible(False) ax.spines['top'].set_visible(False) ax.spines['bottom'].set_visible(False) ax.set_xlabel('Time') ax.set_title(r'$\tilde{y}$') # plot k over time yticks = [5,10,20,40] fig,ax = plt.subplots() plt.plot(df2.Time,df2['k'],'k',linewidth=2) plt.plot(df2.Time,k_tilde_ss*np.array(e_seq),'k--',linewidth=2) ax.set_yscale('log') ax.set_ylim(ymax=50) ax.set_yticks(yticks) ax.set_yticklabels(yticks) ax.spines['left'].set_visible(False) ax.spines['right'].set_visible(False) ax.spines['top'].set_visible(False) ax.spines['bottom'].set_visible(False) ax.set_xlabel('Time') ax.set_title(r'$k$') # plot y over time yticks = [2,4,8,16] fig,ax = plt.subplots() plt.plot(df2.Time,df2['y'],'k.',linewidth=2) plt.plot(df2.Time,y_tilde_ss*np.array(e_seq),'k--',linewidth=2) ax.set_yscale('log') ax.set_ylim(ymax=20) ax.set_yticks(yticks) ax.set_yticklabels(yticks) ax.spines['left'].set_visible(False) ax.spines['right'].set_visible(False) ax.spines['top'].set_visible(False) ax.spines['bottom'].set_visible(False) ax.set_xlabel('Time') ax.set_title(r'$y$') # plot growth rate of y over time fig,ax = plt.subplots() plt.plot(df2.Time,np.log(df2['y']).diff(),'k.',linewidth=2) ax.spines['left'].set_visible(False) ax.spines['right'].set_visible(False) ax.spines['top'].set_visible(False) ax.spines['bottom'].set_visible(False) ax.set_ylim(ymax= .023) ax.set_xlabel('Time') ax.set_title(r'$\dot{y}/y$')<jupyter_output><empty_output>
no_license
/ps2/.ipynb_checkpoints/ps2-checkpoint.ipynb
anhnguyendepocen/economicgrowth
11
<jupyter_start><jupyter_text># Exercise 12 ## Analyze how travelers expressed their feelings on Twitter A sentiment analysis job about the problems of each major U.S. airline. Twitter data was scraped from February of 2015 and contributors were asked to first classify positive, negative, and neutral tweets, followed by categorizing negative reasons (such as "late flight" or "rude service").### **Juan Camilo Florez 201620135** ### **Fernando Perez 200222809** ### **Jhon Florez 201920529** ### **Angie Paola Chacón 201012536**<jupyter_code>import pandas as pd import numpy as np from sklearn.ensemble import RandomForestRegressor, RandomForestClassifier %matplotlib inline import matplotlib.pyplot as plt # read the data and set the datetime as the index tweets = pd.read_csv('https://github.com/albahnsen/PracticalMachineLearningClass/raw/master/datasets/Tweets.zip', index_col=0) tweets.head() tweets.shape<jupyter_output><empty_output><jupyter_text>### Proportion of tweets with each sentiment<jupyter_code>tweets['airline_sentiment'].value_counts()<jupyter_output><empty_output><jupyter_text>### Proportion of tweets per airline <jupyter_code>tweets['airline'].value_counts() pd.Series(tweets["airline"]).value_counts().plot(kind = "bar",figsize=(8,6),rot = 0) pd.crosstab(index = tweets["airline"],columns = tweets["airline_sentiment"]).plot(kind='bar',figsize=(10, 6),alpha=0.5,rot=0,stacked=True,title="Sentiment by airline")<jupyter_output><empty_output><jupyter_text># Exercise 12.1 Predict the sentiment using CountVectorizer use Random Forest classifier<jupyter_code>from sklearn.model_selection import train_test_split, cross_val_score from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer from sklearn.ensemble import RandomForestClassifier from nltk.stem.snowball import SnowballStemmer from nltk.stem import WordNetLemmatizer from sklearn.naive_bayes import MultinomialNB X = tweets['text'] y = tweets['airline_sentiment'].map({'negative':-1,'neutral':0,'positive':1}) <jupyter_output><empty_output><jupyter_text>**crear matrices sparse con los tokens con la función countvectorizer**<jupyter_code>vect = CountVectorizer(lowercase=True) X_dtm = vect.fit_transform(X) df=X_dtm.todense() df print(df.shape)<jupyter_output>(14640, 15051) <jupyter_text>Con la función de vectorizer se crea una matriz sparse con 15051 columnas (palabras). En este caso no se ha hecho ningún tipo de limpieza al texto **lista de tokens**<jupyter_code>print(vect.get_feature_names())<jupyter_output>['00', '000', '000114', '000419', '000ft', '000lbs', '0011', '0016', '00a', '00am', '00p', '00pm', '01', '0162389030167', '0162424965446', '0162431184663', '0167560070877', '0185', '01ldxn3qqq', '01pm', '02', '0200', '03', '0316', '0372389047497', '04', '0400', '04sdytt7zd', '05', '0510', '0530', '05am', '05pm', '06', '0600', '0638', '0671', '07', '0736', '0769', '07p', '07xhcacjax', '08', '0985', '0_0', '0bjnz4eix5', '0cevy3p42b', '0ewj7oklji', '0hmmqczkcf', '0hxlnvzknp', '0jjt4x3yxg', '0jutcdrljl', '0kn7pjelzl', '0liwecasoe', '0pdntgbxc6', '0prgysvurm', '0wbjawx7xd', '0xjared', '10', '100', '1000', '1000cost', '1001', '1002', '1007', '1008', '101', '1016', '1019', '1020', '1024', '1025', '1027', '1028', '103', '1030pm', '1032', '1038', '104', '1041', '1046', '105', '1050', '1051', '1058', '106', '1065', '1071', '1074', '1079871763', '108', '1080', '1081', '1086', '108639', '1089', '1098', '1099', '10a', '10am', '10d', '10f', '10hrs', '10m', '10min', '10mins', '10p', '10pm', '10th', '[...]<jupyter_text>**Modelo Random Forest**<jupyter_code>#CRear base de train y test X_train, X_test, y_train, y_test = train_test_split(df, y, test_size=0.3, random_state=42) from sklearn.ensemble import RandomForestClassifier from sklearn.model_selection import cross_val_score from sklearn.metrics import mean_squared_error from sklearn import metrics clf = RandomForestClassifier(n_jobs=-1, n_estimators=100, max_depth=100, random_state=123, criterion = 'entropy') clf clf.fit(X_train, y_train) y_pred=clf.predict(X_test) acc_vect_constpw =metrics.accuracy_score(y_test, y_pred) print("Accuracy:", acc_vect_constpw)<jupyter_output>Accuracy: 0.7666211293260473 <jupyter_text>**matriz de confusión para las predicciones del sentimiento del tweet: negative':-1,'neutral':0,'positive':1**<jupyter_code>print(pd.crosstab(y_test, y_pred, rownames=['Actual sentiment'], colnames=['Predicted sentiment']))<jupyter_output>Predicted sentiment -1 0 1 Actual sentiment -1 2736 62 16 0 533 316 35 1 308 71 315 <jupyter_text>Con el modelo incluyendo los stopwords se obtiene un accuracy de 0,76.# Exercise 12.2 Remove stopwords, then predict the sentiment using CountVectorizer. use Random Forest classifier<jupyter_code>vect2 = CountVectorizer(lowercase= True, stop_words='english') #calcula los token y elimina stopwords X_dtm2 = vect2.fit_transform(X) #entrenar el modelo df2=X_dtm2.todense() df2 print(df2.shape)<jupyter_output>(14640, 14770) <jupyter_text>Al eliminar los stopword en la función de vectorizer, se crea una matriz sparse con 14770 columnas (palabras) **Modelo Random Forest para countvectorizer sin stopwords**<jupyter_code>#Crear base de train y test X_train2, X_test2, y_train2, y_test2 = train_test_split(df2, y, test_size=0.3, random_state=42) X_train2.shape clf = RandomForestClassifier(n_jobs=-1, n_estimators=100, max_depth=100, random_state=123, criterion = 'entropy') clf clf.fit(X_train2, y_train2) y_pred2=clf.predict(X_test2) acc_vect_sinstpw = metrics.accuracy_score(y_test2, y_pred2) print("Accuracy:",acc_vect_sinstpw)<jupyter_output>Accuracy: 0.7497723132969034 <jupyter_text>**matriz de confusión para las predicciones del sentimiento del tweet: negative':-1,'neutral':0,'positive':1**<jupyter_code>print(pd.crosstab(y_test2, y_pred2, rownames=['Actual sentiment'], colnames=['Predicted sentiment']))<jupyter_output>Predicted sentiment -1 0 1 Actual sentiment -1 2720 45 49 0 614 209 61 1 285 45 364 <jupyter_text>Con el modelo sin stopwords se obtiene un accuracy de 0,74, el cual es menor al modelo sin stopwords (0,76). Esto se puede deber a que alguna de las palabras que se están eliminando aportan información relevante sobre la predicción del sentimiento del tweet.# Exercise 12.3 Increase n_grams size (with and without stopwords), then predict the sentiment using CountVectorizer use Random Forest classifier**Modelo random forest, iterando sobre enegramas eliminando stopwords** <jupyter_code>#Sin la función stopwords gram = range(1,6) acc1=[] clf3 = RandomForestClassifier(n_jobs=-1, n_estimators=100, max_depth=100, random_state=123, criterion = 'entropy') for i in gram: vect3 = CountVectorizer(ngram_range=(1, i), lowercase= True, stop_words='english') X_dtm3 = vect3.fit_transform(X) X_train3, X_test3, y_train3, y_test3 = train_test_split(X_dtm3, y, test_size=0.3, random_state=42) clf3.fit(X_train3, y_train3) y_pred3 = clf3.predict(X_test3) acc = metrics.accuracy_score(y_test3, y_pred3) acc1.append([i, acc]) acc1 <jupyter_output><empty_output><jupyter_text>**Modelo random forest, iterando sobre enegramas sin eliminar stopwords** <jupyter_code>#Sin la función stopwords gram = range(1,6) acc2=[] clf3 = RandomForestClassifier(n_jobs=-1, n_estimators=100, max_depth=100, random_state=123, criterion = 'entropy') for i in gram: vect3 = CountVectorizer(ngram_range=(1, i), lowercase= True) X_dtm3 = vect3.fit_transform(X) X_train3, X_test3, y_train3, y_test3 = train_test_split(X_dtm3, y, test_size=0.3, random_state=42) clf3.fit(X_train3, y_train3) y_pred3 = clf3.predict(X_test3) acc = metrics.accuracy_score(y_test3, y_pred3) acc2.append([i, acc]) acc2 <jupyter_output><empty_output><jupyter_text>Al iterar el número enegramas en los modelos randomforest con y sin stopwords, utilizando la función countvectorizer, se puede observar que el desempeño del modelo tiende a disminuir, a medida que aumenta el tamaño del enegrama. Por lo cual, se recomienda trabajar con un diccionario solo de palabras como tokens. # Exercise 12.4 Predict the sentiment using TfidfVectorizer. use Random Forest classifier### Random forest con TfidfVectorizer eliminando stopwords<jupyter_code># clf3 = RandomForestClassifier(n_jobs=-1, n_estimators=100, max_depth=100, random_state=123, criterion = 'entropy') vect4 = TfidfVectorizer(lowercase= True, stop_words='english') X_dtm4 = vect4.fit_transform(X) X_train4, X_test4, y_train4, y_test4 = train_test_split(X_dtm4, y, test_size=0.3, random_state=42) clf3.fit(X_train4, y_train4) y_pred4 = clf3.predict(X_test4) acc_tfidf_sinstw = metrics.accuracy_score(y_test4, y_pred4) acc_tfidf_sinstw <jupyter_output><empty_output><jupyter_text>### Random forest con TfidfVectorizer sin eliminar stopwords<jupyter_code>clf3 = RandomForestClassifier(n_jobs=-1, n_estimators=100, max_depth=100, random_state=123, criterion = 'entropy') vect5 = TfidfVectorizer( lowercase= True) X_dtm5 = vect5.fit_transform(X) X_train5, X_test5, y_train5, y_test5 = train_test_split(X_dtm5, y, test_size=0.3, random_state=42) clf3.fit(X_train5, y_train5) y_pred5 = clf3.predict(X_test5) acc_tfidf_constw = metrics.accuracy_score(y_test5, y_pred5) acc_tfidf_constw<jupyter_output><empty_output><jupyter_text>En cuanto a los modelos utilizando tfidvectorizer, el modelo que tiene un mejor desempeño es el randomforest sin eliminar stopwords, con un accuracy de 0,71. ### Random forest con TfidfVectorizer eliminando stopwords, iterando enegramas<jupyter_code>#Sin la función stopwords gram = range(1,6) acc3=[] clf3 = RandomForestClassifier(n_jobs=-1, n_estimators=100, max_depth=100, random_state=123, criterion = 'entropy') for i in gram: vect3 = TfidfVectorizer(ngram_range=(1, i), lowercase= True) X_dtm3 = vect3.fit_transform(X) X_train3, X_test3, y_train3, y_test3 = train_test_split(X_dtm3, y, test_size=0.3, random_state=42) clf3.fit(X_train3, y_train3) y_pred3 = clf3.predict(X_test3) acc_tfid_sinst = metrics.accuracy_score(y_test3, y_pred3) acc3.append([i,acc_tfid_sinst]) acc3 <jupyter_output><empty_output><jupyter_text>### Random forest con TfidfVectorizer sin eliminar stopwords, iterando enegramas<jupyter_code>#Sin la función stopwords gram = range(1,6) acc4 = [] clf3 = RandomForestClassifier(n_jobs=-1, n_estimators=100, max_depth=100, random_state=123, criterion = 'entropy') for i in gram: vect3 = TfidfVectorizer(ngram_range=(1, i), lowercase= True, stop_words='english') X_dtm3 = vect3.fit_transform(X) X_train3, X_test3, y_train3, y_test3 = train_test_split(X_dtm3, y, test_size=0.3, random_state=42) clf3.fit(X_train3, y_train3) y_pred3 = clf3.predict(X_test3) acc_tfid_const = metrics.accuracy_score(y_test3, y_pred3) acc4.append([i, acc_tfid_const]) acc4<jupyter_output><empty_output><jupyter_text>Al iterar el número enegramas en los modelos Random Forest con y sin stopwords, utilizando la función TfidfVectorizer para calcular la frecuencia relativa, se puede observar que el desempeño del modelo tiende a disminuir, a medida que aumenta el tamaño del enegrama. Por lo cual, se recomienda trabajar con un diccionario solo de palabras como tokens. ## Conclusiones<jupyter_code> #print('Modelo con countvectorizer sin eliminar stop words \n',acc_vect_constpw, '\n ') #print('Modelo con countvectorizer eliminando stop words \n',acc_vect_sinstpw, '\n ') print('Modelo con countvectorizer sin eliminar stop words, iterando enegramas \n',acc2, '\n ') print('Modelo con countvectorizer eliminando stop words, iterando enegramas \n', acc1, '\n ') #print('Modelo con TfidfVectorize sin eliminar stop words \n', acc_tfidf_constw, '\n ') #print('Modelo con TfidfVectorize sin eliminar stop words \n', acc_tfidf_sinstw, '\n ') print('Modelo con TfidfVectorize sin eliminar stop words, iterando enegramas \n', acc3, '\n ') print('Modelo con TfidfVectorize sin eliminar stop words, iterando enegramas \n', acc4, '\n ') <jupyter_output>Modelo con countvectorizer sin eliminar stop words, iterando enegramas [[1, 0.7666211293260473], [2, 0.7331511839708561], [3, 0.7122040072859745], [4, 0.6933060109289617], [5, 0.6810109289617486]] Modelo con countvectorizer eliminando stop words, iterando enegramas [[1, 0.7497723132969034], [2, 0.7106102003642987], [3, 0.6867030965391621], [4, 0.6755464480874317], [5, 0.6614298724954463]] Modelo con TfidfVectorize sin eliminar stop words, iterando enegramas [[1, 0.7556921675774135], [2, 0.7290528233151184], [3, 0.7135701275045537], [4, 0.6996812386156649], [5, 0.6903460837887068]] Modelo con TfidfVectorize sin eliminar stop words, iterando enegramas [[1, 0.7486338797814208], [2, 0.7090163934426229], [3, 0.6898907103825137], [4, 0.6687158469945356], [5, 0.660063752276867]] <jupyter_text>**El modelo de mejor desempeño para clasificar el sentimiento del tweet para las aerolíneas se da con la función countvectorizer con un accuracy del 76%, en este caso se debe tener en cuenta los stopwords y no se deben incluir enegramas mayores a 2.** Asímismo, se recomienda hacer una calibración de los parámetros del random forest, para obtener un mejor resultado de predicción. <jupyter_code><jupyter_output><empty_output>
no_license
/E12_SentimentPrediction VF.ipynb
angiepa2130/P3-MAAD-grupo-5
19
<jupyter_start><jupyter_text>## Dependencies<jupyter_code>import random, os, warnings, math import numpy as np import pandas as pd import seaborn as sns from matplotlib import pyplot as plt from sklearn.model_selection import KFold from sklearn.metrics import mean_squared_error import tensorflow as tf import tensorflow.keras.layers as L import tensorflow.keras.backend as K from tensorflow.keras import optimizers, losses, metrics, Model from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint, LearningRateScheduler from transformers import TFAutoModelForSequenceClassification, TFAutoModel, AutoTokenizer def seed_everything(seed=0): random.seed(seed) np.random.seed(seed) tf.random.set_seed(seed) os.environ['PYTHONHASHSEED'] = str(seed) os.environ['TF_DETERMINISTIC_OPS'] = '1' seed = 0 seed_everything(seed) sns.set(style='whitegrid') warnings.filterwarnings('ignore') pd.set_option('display.max_colwidth', 150)<jupyter_output><empty_output><jupyter_text>### Hardware configuration<jupyter_code># TPU or GPU detection # Detect hardware, return appropriate distribution strategy try: tpu = tf.distribute.cluster_resolver.TPUClusterResolver() print(f'Running on TPU {tpu.master()}') except ValueError: tpu = None if tpu: tf.config.experimental_connect_to_cluster(tpu) tf.tpu.experimental.initialize_tpu_system(tpu) strategy = tf.distribute.experimental.TPUStrategy(tpu) else: strategy = tf.distribute.get_strategy() AUTO = tf.data.experimental.AUTOTUNE REPLICAS = strategy.num_replicas_in_sync print(f'REPLICAS: {REPLICAS}')<jupyter_output>Running on TPU grpc://10.0.0.2:8470 REPLICAS: 8 <jupyter_text># Load data<jupyter_code>train_filepath = '/kaggle/input/commonlitreadabilityprize/train.csv' train = pd.read_csv(train_filepath) print(f'Train samples: {len(train)}') display(train.head()) # removing unused columns train.drop(['url_legal', 'license'], axis=1, inplace=True)<jupyter_output>Train samples: 2834 <jupyter_text># Model parameters<jupyter_code>BATCH_SIZE = 8 * REPLICAS LEARNING_RATE = 1e-5 * REPLICAS EPOCHS = 35 ES_PATIENCE = 10 PATIENCE = 2 N_FOLDS = 5 N_USED_FOLDS = 1 SEQ_LEN = 256 BASE_MODEL = '/kaggle/input/huggingface-roberta/roberta-base/'<jupyter_output><empty_output><jupyter_text>## Auxiliary functions<jupyter_code># Datasets utility functions def custom_standardization(text): text = text.lower() # if encoder is uncased text = text.strip() return text def sample_target(features, target): mean, stddev = target sampled_target = tf.random.normal([], mean=tf.cast(mean, dtype=tf.float32), stddev=tf.cast(stddev, dtype=tf.float32), dtype=tf.float32) return (features, sampled_target) def get_dataset(pandas_df, tokenizer, labeled=True, ordered=False, repeated=False, is_sampled=False, batch_size=32, seq_len=128): """ Return a Tensorflow dataset ready for training or inference. """ text = [custom_standardization(text) for text in pandas_df['excerpt']] # Tokenize inputs tokenized_inputs = tokenizer(text, max_length=seq_len, truncation=True, padding='max_length', return_tensors='tf') if labeled: dataset = tf.data.Dataset.from_tensor_slices(({'input_ids': tokenized_inputs['input_ids'], 'attention_mask': tokenized_inputs['attention_mask']}, (pandas_df['target'], pandas_df['standard_error']))) if is_sampled: dataset = dataset.map(sample_target, num_parallel_calls=tf.data.AUTOTUNE) else: dataset = tf.data.Dataset.from_tensor_slices({'input_ids': tokenized_inputs['input_ids'], 'attention_mask': tokenized_inputs['attention_mask']}) if repeated: dataset = dataset.repeat() if not ordered: dataset = dataset.shuffle(1024) dataset = dataset.batch(batch_size) dataset = dataset.prefetch(tf.data.AUTOTUNE) return dataset def plot_metrics(history): metric_list = list(history.keys()) size = len(metric_list)//2 fig, axes = plt.subplots(size, 1, sharex='col', figsize=(20, size * 5)) axes = axes.flatten() for index in range(len(metric_list)//2): metric_name = metric_list[index] val_metric_name = metric_list[index+size] axes[index].plot(history[metric_name], label='Train %s' % metric_name) axes[index].plot(history[val_metric_name], label='Validation %s' % metric_name) axes[index].legend(loc='best', fontsize=16) axes[index].set_title(metric_name) plt.xlabel('Epochs', fontsize=16) sns.despine() plt.show()<jupyter_output><empty_output><jupyter_text># Model<jupyter_code>def model_fn(encoder, seq_len=256): input_ids = L.Input(shape=(seq_len,), dtype=tf.int32, name='input_ids') input_attention_mask = L.Input(shape=(seq_len,), dtype=tf.int32, name='attention_mask') outputs = encoder({'input_ids': input_ids, 'attention_mask': input_attention_mask}) last_hidden_state = outputs['last_hidden_state'] x = L.GlobalAveragePooling1D()(last_hidden_state) output = L.Dense(1, name='output')(x) model = Model(inputs=[input_ids, input_attention_mask], outputs=output) optimizer = optimizers.Adam(lr=LEARNING_RATE) model.compile(optimizer=optimizer, loss=losses.MeanSquaredError(), metrics=[metrics.RootMeanSquaredError()]) return model with strategy.scope(): encoder = TFAutoModel.from_pretrained(BASE_MODEL) model = model_fn(encoder, SEQ_LEN) model.summary()<jupyter_output>Some layers from the model checkpoint at /kaggle/input/huggingface-roberta/roberta-base/ were not used when initializing TFRobertaModel: ['lm_head'] - This IS expected if you are initializing TFRobertaModel from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model). - This IS NOT expected if you are initializing TFRobertaModel from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model). All the layers of TFRobertaModel were initialized from the model checkpoint at /kaggle/input/huggingface-roberta/roberta-base/. If your task is similar to the task the model of the checkpoint was trained on, you can already use TFRobertaModel for predictions without further training. <jupyter_text># Training<jupyter_code>tokenizer = AutoTokenizer.from_pretrained(BASE_MODEL) skf = KFold(n_splits=N_FOLDS, shuffle=True, random_state=seed) oof_pred = []; oof_labels = []; history_list = [] for fold,(idxT, idxV) in enumerate(skf.split(train)): if fold >= N_USED_FOLDS: break if tpu: tf.tpu.experimental.initialize_tpu_system(tpu) print(f'\nFOLD: {fold+1}') print(f'TRAIN: {len(idxT)} VALID: {len(idxV)}') # Model K.clear_session() with strategy.scope(): encoder = TFAutoModel.from_pretrained(BASE_MODEL) model = model_fn(encoder, SEQ_LEN) model_path = f'model_{fold}.h5' es = EarlyStopping(monitor='val_root_mean_squared_error', mode='min', patience=ES_PATIENCE, restore_best_weights=True, verbose=1) checkpoint = ModelCheckpoint(model_path, monitor='val_root_mean_squared_error', mode='min', save_best_only=True, save_weights_only=True) # Train history = model.fit(x=get_dataset(train.loc[idxT], tokenizer, repeated=True, is_sampled=True, batch_size=BATCH_SIZE, seq_len=SEQ_LEN), validation_data=get_dataset(train.loc[idxV], tokenizer, ordered=True, batch_size=BATCH_SIZE, seq_len=SEQ_LEN), steps_per_epoch=50, callbacks=[es, checkpoint], epochs=EPOCHS, verbose=2).history history_list.append(history) # Save last model weights model.load_weights(model_path) # Results print(f"#### FOLD {fold+1} OOF RMSE = {np.min(history['val_root_mean_squared_error']):.4f}") # OOF predictions valid_ds = get_dataset(train.loc[idxV], tokenizer, ordered=True, batch_size=BATCH_SIZE, seq_len=SEQ_LEN) oof_labels.append([target[0].numpy() for sample, target in iter(valid_ds.unbatch())]) x_oof = valid_ds.map(lambda sample, target: sample) oof_pred.append(model.predict(x_oof))<jupyter_output> FOLD: 1 TRAIN: 2267 VALID: 567 <jupyter_text>## Model loss and metrics graph<jupyter_code>for fold, history in enumerate(history_list): print(f'\nFOLD: {fold+1}') plot_metrics(history)<jupyter_output> FOLD: 1 <jupyter_text># Model evaluation We are evaluating the model on the `OOF` predictions, it stands for `Out Of Fold`, since we are training using `K-Fold` our model will see all the data, and the correct way to evaluate each fold is by looking at the predictions that are not from that fold. ## OOF metrics<jupyter_code>y_true = np.concatenate(oof_labels) y_preds = np.concatenate(oof_pred) for fold, history in enumerate(history_list): print(f"FOLD {fold+1} RMSE: {np.min(history['val_root_mean_squared_error']):.4f}") print(f'OOF RMSE: {mean_squared_error(y_true, y_preds, squared=False):.4f}')<jupyter_output>FOLD 1 RMSE: 0.5370 OOF RMSE: 0.5370 <jupyter_text>### **Error analysis**, label x prediction distribution Here we can compare the distribution from the labels and the predicted values, in a perfect scenario they should align.<jupyter_code>preds_df = pd.DataFrame({'Label': y_true, 'Prediction': y_preds[:,0]}) fig, ax = plt.subplots(1, 1, figsize=(20, 6)) sns.distplot(preds_df['Label'], ax=ax, label='Label') sns.distplot(preds_df['Prediction'], ax=ax, label='Prediction') ax.legend() plt.show() sns.jointplot(data=preds_df, x='Label', y='Prediction', kind='reg', height=10) plt.show()<jupyter_output><empty_output>
permissive
/Model backlog/Train/7-commonlit-roberta-base-seq-256-sampling.ipynb
dimitreOliveira/CommonLit-Readability-Prize
10
<jupyter_start><jupyter_text>diffs, co can z scale, hl can smartscale sigma2<jupyter_code>for label in ['diff_co','diff_hl']: my_df[label].plot.hist( bins=30, title=label ) plt.show() np.log10( my_df['diff_hl'] ).plot.hist( bins=30, title='log( diff_hl )' ) plt.show() foo, vals = rv.smart_scale( my_df, 'diff_co', n_sigma=8.0, show_final=True, curve_boost=4e4, return_coeff=True ) print foo.describe() foo = rv.scale_column( my_df, 'diff_co' ) foo.plot.hist( bins=91 ) plt.xlim(-5,5) plt.show() scale_dict['diff_co_mean'] = my_df['diff_co'].mean() scale_dict['diff_co_std' ] = my_df['diff_co'].std() my_df['log_diff_hl'] = np.log10( my_df['diff_hl'] ) foo, vals = rv.smart_scale( my_df, 'log_diff_hl', n_sigma=2.0, show_final=True, curve_boost=4e4, return_coeff=True ) scale_dict['log_diff_hl_mean'] = vals[0] scale_dict['log_diff_hl_std' ] = vals[1] print foo.describe()<jupyter_output><empty_output><jupyter_text>Momentum, z scaling does well<jupyter_code>for i in mom_nums: label = 'momentum_'+str(i) my_df[label].plot.hist( bins=30, title=label ) plt.show() for i in mom_nums: label = 'momentum_'+str(i) foo = rv.scale_column(my_df,label)#smart_scale( my_df, label, n_sigma=10.0, show_final=True, curve_boost=1e4, return_coeff=True, ) #vals[0] = foo.mean() #vals[1] = foo.std() #scale_dict[label+'_mean'] = my_df[label].mean() #scale_dict[label+'_std' ] = my_df[label].std() foo.plot.hist( bins=91 ) plt.xlim(-5,5) plt.show() print foo.describe() for i in mom_nums: label = 'momentum_'+str(i) ( (my_df[label]-0.0) / 0.1 ).plot.hist( bins=71, title=label ) plt.xlim(-5,5) plt.show() scale_dict['momentum_mean'] = 0.0 scale_dict['momentum_std' ] = 0.1<jupyter_output><empty_output><jupyter_text>RSI, do a modified zscaling. Center of distribution is clearly 0.5 by definition, using std of 0.2 for all will put in same window, and sort of resemble a normal distribution<jupyter_code>for i in rsi_nums: label = 'rsi_'+str(i) my_df[label].plot.hist( bins=30, title=label ) plt.show() for i in rsi_nums: label = 'rsi_'+str(i) ( (my_df[label] - 0.5) / 0.2 ).plot.hist( bins=30, title=label ) plt.show() for i in rsi_nums: label = 'rsi_'+str(i) foo, vals = rv.smart_scale( my_df, label, n_sigma=2.0, show_final=True, curve_boost=1e4, return_coeff=True ) #scale_dict[label+'_mean'] = vals[0] #scale_dict[label+'_std' ] = vals[1] print foo.describe() scale_dict['rsi_mean'] = 0.5 scale_dict['rsi_std' ] = 0.2<jupyter_output><empty_output><jupyter_text>Bollinger bands, already centered at 0, use std of 0.65 for proper spread<jupyter_code>for i in band_nums: label = 'bollinger_'+str(i) my_df[label].plot.hist( bins=30, title=label ) plt.show() for i in band_nums: label = 'bollinger_'+str(i) #foo, vals = rv.smart_scale( my_df, label, n_sigma=2.0, show_final=True, curve_boost=1e4, return_coeff=True ) #scale_dict[label+'_mean'] = vals[0] #scale_dict[label+'_std' ] = vals[1] #foo = rv.scale_column(my_df,label)#smart_scale( my_df, label, n_sigma=10.0, show_final=True, curve_boost=1e4, return_coeff=True, ) #vals[0] = foo.mean() #vals[1] = foo.std() #scale_dict[label+'_mean'] = my_df[label].mean() #scale_dict[label+'_std' ] = my_df[label].std() foo = ( (my_df[label] - 0.0 ) / 0.65 ) foo.plot.hist( bins=91 ) plt.xlim(-5,5) plt.show() print my_df[label].mean() print my_df[label].std() print foo.describe() scale_dict['band_mean'] = 0.0 scale_dict['band_std' ] = 0.65 my_df.columns.values print my_df['frac_year_1'].describe() print my_df['frac_year_2'].describe() my_df['close'].plot.hist( bins=30 ) print scale_dict import pickle with open('quotes/scaling_dict.pkl','wb') as handle: pickle.dump( scale_dict, handle, protocol=pickle.HIGHEST_PROTOCOL )<jupyter_output><empty_output>
no_license
/Scaling.ipynb
markertsean/python_trader
4
<jupyter_start><jupyter_text><jupyter_code># install packages for Colab # install.packages(c("rsample", "caret", "vip", "h2o", "AmesHousing", "viridis", "broom")) # Helper packages library(dplyr) # for data manipulation library(ggplot2) # for awesome graphics ggplot2::theme_set(ggplot2::theme_light()) library(viridis) library(broom) # Modeling process packages library(rsample) # for resampling library(caret) # for resampling and model training library(h2o) # for resampling and model training h2o.no_progress() # turn off h2o progress bars h2o.init() # launch h2o # Model interpretability packages library(vip) # variable importance # Ames housing data library(AmesHousing) ames <- AmesHousing::make_ames() ames.h2o <- as.h2o(ames)<jupyter_output> Attaching package: ‘dplyr’ The following objects are masked from ‘package:stats’: filter, lag The following objects are masked from ‘package:base’: intersect, setdiff, setequal, union Loading required package: viridisLite Loading required package: lattice ---------------------------------------------------------------------- Your next step is to start H2O: > h2o.init() For H2O package documentation, ask for help: > ??h2o After starting H2O, you can use the Web UI at http://localhost:54321 For more information visit https://docs.h2o.ai ---------------------------------------------------------------------- Attaching package: ‘h2o’ The following objects are masked from ‘package:stats’: cor, sd, var The following objects are masked from ‘package:base’: &&, %*%, %in%, ||, apply, as.factor, as.numeric, colnames, colnames<-, ifelse, is.character, is.factor, is.numeric, log, log10, log1p, log2, round, signif, trunc <jupyter_text># Workflow using `rsample`## Simple linear regression### Estimation<jupyter_code># stratified sampling set.seed(123) split <- initial_split(ames, prop = 0.7, strata = "Sale_Price") ames_train <- training(split) ames_test <- testing(split) # linear model with single predictor model1 <- lm(Sale_Price ~ Gr_Liv_Area, data = ames_train) # Fitted regression line (full training data) p1 <- model1 %>% broom::augment() %>% ggplot(aes(Gr_Liv_Area, Sale_Price)) + geom_point(size = 1, alpha = 0.3) + geom_smooth(se = FALSE, method = "lm") + scale_y_continuous(labels = scales::dollar) + ggtitle("Fitted regression line") # Fitted regression line (restricted range) p2 <- model1 %>% broom::augment() %>% ggplot(aes(Gr_Liv_Area, Sale_Price)) + geom_segment(aes(x = Gr_Liv_Area, y = Sale_Price, xend = Gr_Liv_Area, yend = .fitted), alpha = 0.3) + geom_point(size = 1, alpha = 0.3) + geom_smooth(se = FALSE, method = "lm") + scale_y_continuous(labels = scales::dollar) + ggtitle("Fitted regression line (with residuals)") # Side-by-side plots grid.arrange(p1, p2, nrow = 1) summary(model1) sigma(model1) #RMSE sigma(model1)^2 #MSE<jupyter_output><empty_output><jupyter_text>### Inference<jupyter_code>confint(model1, level = 0.95)<jupyter_output><empty_output><jupyter_text>## Multiple linear regression<jupyter_code>(model2 <- lm(Sale_Price ~ Gr_Liv_Area + Year_Built, data = ames_train)) (model2 <- update(model1, . ~ . + Year_Built)) lm(Sale_Price ~ Gr_Liv_Area + Year_Built + Gr_Liv_Area:Year_Built, data = ames_train) # Fitted models fit1 <- lm(Sale_Price ~ Gr_Liv_Area + Year_Built, data = ames_train) fit2 <- lm(Sale_Price ~ Gr_Liv_Area * Year_Built, data = ames_train) # Regression plane data plot_grid <- expand.grid( Gr_Liv_Area = seq(from = min(ames_train$Gr_Liv_Area), to = max(ames_train$Gr_Liv_Area), length = 100), Year_Built = seq(from = min(ames_train$Year_Built), to = max(ames_train$Year_Built), length = 100) ) plot_grid$y1 <- predict(fit1, newdata = plot_grid) plot_grid$y2 <- predict(fit2, newdata = plot_grid) # Level plots p1 <- ggplot(plot_grid, aes(x = Gr_Liv_Area, y = Year_Built, z = y1, fill = y1)) + geom_tile() + geom_contour(color = "white") + viridis::scale_fill_viridis(name = "Predicted\nvalue", option = "inferno") + theme_bw() + ggtitle("Main effects only") p2 <- ggplot(plot_grid, aes(x = Gr_Liv_Area, y = Year_Built, z = y2, fill = y1)) + geom_tile() + geom_contour(color = "white") + viridis::scale_fill_viridis(name = "Predicted\nvalue", option = "inferno") + theme_bw() + ggtitle("Main effects with two-way interaction") gridExtra::grid.arrange(p1, p2, nrow = 1) install.packages("broom") library(broom) # include all possible main effects model3 <- lm(Sale_Price ~ ., data = ames_train) # print estimated coefficients in a tidy data frame broom::tidy(model3) <jupyter_output><empty_output><jupyter_text>## Assessing model accuracy<jupyter_code># Train model using 10-fold cross-validation set.seed(123) # for reproducibility (cv_model1 <- train( form = Sale_Price ~ Gr_Liv_Area, data = ames_train, method = "lm", trControl = trainControl(method = "cv", number = 10) )) # model 2 CV set.seed(123) cv_model2 <- train( Sale_Price ~ Gr_Liv_Area + Year_Built, data = ames_train, method = "lm", trControl = trainControl(method = "cv", number = 10) ) # model 3 CV set.seed(123) cv_model3 <- train( Sale_Price ~ ., data = ames_train, method = "lm", trControl = trainControl(method = "cv", number = 10) ) # Extract out of sample performance measures summary(resamples(list( model1 = cv_model1, model2 = cv_model2, model3 = cv_model3 )))<jupyter_output>Warning message in predict.lm(modelFit, newdata): “prediction from a rank-deficient fit may be misleading” Warning message in predict.lm(modelFit, newdata): “prediction from a rank-deficient fit may be misleading” Warning message in predict.lm(modelFit, newdata): “prediction from a rank-deficient fit may be misleading” Warning message in predict.lm(modelFit, newdata): “prediction from a rank-deficient fit may be misleading” Warning message in predict.lm(modelFit, newdata): “prediction from a rank-deficient fit may be misleading” Warning message in predict.lm(modelFit, newdata): “prediction from a rank-deficient fit may be misleading” Warning message in predict.lm(modelFit, newdata): “prediction from a rank-deficient fit may be misleading” Warning message in predict.lm(modelFit, newdata): “prediction from a rank-deficient fit may be misleading” Warning message in predict.lm(modelFit, newdata): “prediction from a rank-deficient fit may be misleading” Warning message in predict.lm(modelFi[...]<jupyter_text>## Model concerns<jupyter_code>p1 <- ggplot(ames_train, aes(Year_Built, Sale_Price)) + geom_point(size = 1, alpha = .4) + geom_smooth(se = FALSE) + scale_y_continuous("Sale price", labels = scales::dollar) + xlab("Year built") + ggtitle(paste("Non-transformed variables with a\n", "non-linear relationship.")) p2 <- ggplot(ames_train, aes(Year_Built, Sale_Price)) + geom_point(size = 1, alpha = .4) + geom_smooth(method = "lm", se = FALSE) + scale_y_log10("Sale price", labels = scales::dollar, breaks = seq(0, 400000, by = 100000)) + xlab("Year built") + ggtitle(paste("Transforming variables can provide a\n", "near-linear relationship.")) gridExtra::grid.arrange(p1, p2, nrow = 1) df1 <- broom::augment(cv_model1$finalModel, data = ames_train) p1 <- ggplot(df1, aes(.fitted, .resid)) + geom_point(size = 1, alpha = .4) + xlab("Predicted values") + ylab("Residuals") + ggtitle("Model 1", subtitle = "Sale_Price ~ Gr_Liv_Area") df2 <- broom::augment(cv_model3$finalModel, data = ames_train) p2 <- ggplot(df2, aes(.fitted, .resid)) + geom_point(size = 1, alpha = .4) + xlab("Predicted values") + ylab("Residuals") + ggtitle("Model 3", subtitle = "Sale_Price ~ .") gridExtra::grid.arrange(p1, p2, nrow = 1)<jupyter_output><empty_output>
permissive
/ames-housing/homlr-chapter-4-linear-regression.ipynb
YoYo1971/discover-projects
6
<jupyter_start><jupyter_text># Regression Week 4: Ridge Regression (interpretation)In this notebook, we will run ridge regression multiple times with different L2 penalties to see which one produces the best fit. We will revisit the example of polynomial regression as a means to see the effect of L2 regularization. In particular, we will: * Use a pre-built implementation of regression (GraphLab Create) to run polynomial regression * Use matplotlib to visualize polynomial regressions * Use a pre-built implementation of regression (GraphLab Create) to run polynomial regression, this time with L2 penalty * Use matplotlib to visualize polynomial regressions under L2 regularization * Choose best L2 penalty using cross-validation. * Assess the final fit using test data. We will continue to use the House data from previous notebooks. (In the next programming assignment for this module, you will implement your own ridge regression learning algorithm using gradient descent.)# Fire up graphlab create<jupyter_code>import graphlab<jupyter_output>/usr/local/lib/python2.7/dist-packages/requests/packages/urllib3/util/ssl_.py:315: SNIMissingWarning: An HTTPS request has been made, but the SNI (Subject Name Indication) extension to TLS is not available on this platform. This may cause the server to present an incorrect TLS certificate, which can cause validation failures. For more information, see https://urllib3.readthedocs.org/en/latest/security.html#snimissingwarning. SNIMissingWarning /usr/local/lib/python2.7/dist-packages/requests/packages/urllib3/util/ssl_.py:120: InsecurePlatformWarning: A true SSLContext object is not available. This prevents urllib3 from configuring SSL appropriately and may cause certain SSL connections to fail. For more information, see https://urllib3.readthedocs.org/en/latest/security.html#insecureplatformwarning. InsecurePlatformWarning [WARNING] Unable to write current GraphLab Create license to /home/kuntal/.graphlab/config. Ensure that this user account has write permiss[...]<jupyter_text># Polynomial regression, revisitedWe build on the material from Week 3, where we wrote the function to produce an SFrame with columns containing the powers of a given input. Copy and paste the function `polynomial_sframe` from Week 3:<jupyter_code>def polynomial_sframe(feature, degree): # assume that degree >= 1 # initialize the SFrame: poly_sframe = graphlab.SFrame() # and set poly_sframe['power_1'] equal to the passed feature poly_sframe['power_1'] = feature # first check if degree > 1 if degree > 1: # then loop over the remaining degrees: # range usually starts at 0 and stops at the endpoint-1. We want it to start at 2 and stop at degree for power in range(2, degree+1): # first we'll give the column a name: name = 'power_' + str(power) # then assign poly_sframe[name] to the appropriate power of feature poly_sframe[name] = feature**power return poly_sframe <jupyter_output><empty_output><jupyter_text>Let's use matplotlib to visualize what a polynomial regression looks like on the house data.<jupyter_code>import matplotlib.pyplot as plt %matplotlib inline sales = graphlab.SFrame('kc_house_data.gl/')<jupyter_output><empty_output><jupyter_text>As in Week 3, we will use the sqft_living variable. For plotting purposes (connecting the dots), you'll need to sort by the values of sqft_living. For houses with identical square footage, we break the tie by their prices.<jupyter_code>sales = sales.sort(['sqft_living','price'])<jupyter_output><empty_output><jupyter_text>Let us revisit the 15th-order polynomial model using the 'sqft_living' input. Generate polynomial features up to degree 15 using `polynomial_sframe()` and fit a model with these features. When fitting the model, use an L2 penalty of `1e-5`:<jupyter_code>l2_small_penalty = 1e-5<jupyter_output><empty_output><jupyter_text>Note: When we have so many features and so few data points, the solution can become highly numerically unstable, which can sometimes lead to strange unpredictable results. Thus, rather than using no regularization, we will introduce a tiny amount of regularization (`l2_penalty=1e-5`) to make the solution numerically stable. (In lecture, we discussed the fact that regularization can also help with numerical stability, and here we are seeing a practical example.) With the L2 penalty specified above, fit the model and print out the learned weights. Hint: make sure to add 'price' column to the new SFrame before calling `graphlab.linear_regression.create()`. Also, make sure GraphLab Create doesn't create its own validation set by using the option `validation_set=None` in this call.<jupyter_code>poly1_data = polynomial_sframe(sales['sqft_living'], 15) # use equivalent of `polynomial_sframe` my_features = poly1_data.column_names() poly1_data['price'] = sales['price'] model1 = graphlab.linear_regression.create(poly1_data, target = 'price', features = my_features, validation_set = None, l2_penalty=l2_small_penalty) model1.get("coefficients")<jupyter_output><empty_output><jupyter_text>***QUIZ QUESTION: What's the learned value for the coefficient of feature `power_1`?***# Observe overfittingRecall from Week 3 that the polynomial fit of degree 15 changed wildly whenever the data changed. In particular, when we split the sales data into four subsets and fit the model of degree 15, the result came out to be very different for each subset. The model had a *high variance*. We will see in a moment that ridge regression reduces such variance. But first, we must reproduce the experiment we did in Week 3.First, split the data into split the sales data into four subsets of roughly equal size and call them `set_1`, `set_2`, `set_3`, and `set_4`. Use `.random_split` function and make sure you set `seed=0`. <jupyter_code>(semi_split1, semi_split2) = sales.random_split(.5,seed=0) (set_1, set_2) = semi_split1.random_split(0.5, seed=0) (set_3, set_4) = semi_split2.random_split(0.5, seed=0)<jupyter_output><empty_output><jupyter_text>Next, fit a 15th degree polynomial on `set_1`, `set_2`, `set_3`, and `set_4`, using 'sqft_living' to predict prices. Print the weights and make a plot of the resulting model. Hint: When calling `graphlab.linear_regression.create()`, use the same L2 penalty as before (i.e. `l2_small_penalty`). Also, make sure GraphLab Create doesn't create its own validation set by using the option `validation_set = None` in this call.<jupyter_code>poly1_data = polynomial_sframe(set_1['sqft_living'], 15) # use equivalent of `polynomial_sframe` my_features = poly1_data.column_names() poly1_data['price'] = set_1['price'] model1 = graphlab.linear_regression.create(poly1_data, target = 'price', features = my_features, verbose = False, validation_set = None, l2_penalty=l2_small_penalty) model1.get("coefficients") poly1_data = polynomial_sframe(set_2['sqft_living'], 15) # use equivalent of `polynomial_sframe` my_features = poly1_data.column_names() poly1_data['price'] = set_2['price'] model1 = graphlab.linear_regression.create(poly1_data, target = 'price', features = my_features, verbose = False, validation_set = None, l2_penalty=l2_small_penalty) model1.get("coefficients") poly1_data = polynomial_sframe(set_3['sqft_living'], 15) # use equivalent of `polynomial_sframe` my_features = poly1_data.column_names() poly1_data['price'] = set_3['price'] model1 = graphlab.linear_regression.create(poly1_data, target = 'price', features = my_features, verbose = False, validation_set = None, l2_penalty=l2_small_penalty) model1.get("coefficients") poly1_data = polynomial_sframe(set_4['sqft_living'], 15) # use equivalent of `polynomial_sframe` my_features = poly1_data.column_names() poly1_data['price'] = set_4['price'] model1 = graphlab.linear_regression.create(poly1_data, target = 'price', features = my_features, verbose = False, validation_set = None, l2_penalty=l2_small_penalty) model1.get("coefficients")<jupyter_output><empty_output><jupyter_text>The four curves should differ from one another a lot, as should the coefficients you learned. ***QUIZ QUESTION: For the models learned in each of these training sets, what are the smallest and largest values you learned for the coefficient of feature `power_1`?*** (For the purpose of answering this question, negative numbers are considered "smaller" than positive numbers. So -5 is smaller than -3, and -3 is smaller than 5 and so forth.)# Ridge regression comes to rescueGenerally, whenever we see weights change so much in response to change in data, we believe the variance of our estimate to be large. Ridge regression aims to address this issue by penalizing "large" weights. (Weights of `model15` looked quite small, but they are not that small because 'sqft_living' input is in the order of thousands.) With the argument `l2_penalty=1e5`, fit a 15th-order polynomial model on `set_1`, `set_2`, `set_3`, and `set_4`. Other than the change in the `l2_penalty` parameter, the code should be the same as the experiment above. Also, make sure GraphLab Create doesn't create its own validation set by using the option `validation_set = None` in this call.<jupyter_code>poly1_data = polynomial_sframe(set_1['sqft_living'], 15) # use equivalent of `polynomial_sframe` my_features = poly1_data.column_names() poly1_data['price'] = set_1['price'] model1 = graphlab.linear_regression.create(poly1_data, target = 'price', features = my_features, verbose = False, validation_set = None, l2_penalty=1e5) model1.get("coefficients") poly1_data = polynomial_sframe(set_2['sqft_living'], 15) # use equivalent of `polynomial_sframe` my_features = poly1_data.column_names() poly1_data['price'] = set_2['price'] model1 = graphlab.linear_regression.create(poly1_data, target = 'price', features = my_features, verbose = False, validation_set = None, l2_penalty=1e5) model1.get("coefficients") poly1_data = polynomial_sframe(set_3['sqft_living'], 15) # use equivalent of `polynomial_sframe` my_features = poly1_data.column_names() poly1_data['price'] = set_3['price'] model1 = graphlab.linear_regression.create(poly1_data, target = 'price', features = my_features, verbose = False, validation_set = None, l2_penalty=1e5) model1.get("coefficients") poly1_data = polynomial_sframe(set_4['sqft_living'], 15) # use equivalent of `polynomial_sframe` my_features = poly1_data.column_names() poly1_data['price'] = set_4['price'] model1 = graphlab.linear_regression.create(poly1_data, target = 'price', features = my_features, verbose = False, validation_set = None, l2_penalty=1e5) model1.get("coefficients")<jupyter_output><empty_output><jupyter_text>These curves should vary a lot less, now that you applied a high degree of regularization. ***QUIZ QUESTION: For the models learned with the high level of regularization in each of these training sets, what are the smallest and largest values you learned for the coefficient of feature `power_1`?*** (For the purpose of answering this question, negative numbers are considered "smaller" than positive numbers. So -5 is smaller than -3, and -3 is smaller than 5 and so forth.)# Selecting an L2 penalty via cross-validationJust like the polynomial degree, the L2 penalty is a "magic" parameter we need to select. We could use the validation set approach as we did in the last module, but that approach has a major disadvantage: it leaves fewer observations available for training. **Cross-validation** seeks to overcome this issue by using all of the training set in a smart way. We will implement a kind of cross-validation called **k-fold cross-validation**. The method gets its name because it involves dividing the training set into k segments of roughtly equal size. Similar to the validation set method, we measure the validation error with one of the segments designated as the validation set. The major difference is that we repeat the process k times as follows: Set aside segment 0 as the validation set, and fit a model on rest of data, and evalutate it on this validation set Set aside segment 1 as the validation set, and fit a model on rest of data, and evalutate it on this validation set ... Set aside segment k-1 as the validation set, and fit a model on rest of data, and evalutate it on this validation set After this process, we compute the average of the k validation errors, and use it as an estimate of the generalization error. Notice that all observations are used for both training and validation, as we iterate over segments of data. To estimate the generalization error well, it is crucial to shuffle the training data before dividing them into segments. GraphLab Create has a utility function for shuffling a given SFrame. We reserve 10% of the data as the test set and shuffle the remainder. (Make sure to use `seed=1` to get consistent answer.)<jupyter_code>(train_valid, test) = sales.random_split(.9, seed=1) train_valid_shuffled = graphlab.toolkits.cross_validation.shuffle(train_valid, random_seed=1)<jupyter_output><empty_output><jupyter_text>Once the data is shuffled, we divide it into equal segments. Each segment should receive `n/k` elements, where `n` is the number of observations in the training set and `k` is the number of segments. Since the segment 0 starts at index 0 and contains `n/k` elements, it ends at index `(n/k)-1`. The segment 1 starts where the segment 0 left off, at index `(n/k)`. With `n/k` elements, the segment 1 ends at index `(n*2/k)-1`. Continuing in this fashion, we deduce that the segment `i` starts at index `(n*i/k)` and ends at `(n*(i+1)/k)-1`.With this pattern in mind, we write a short loop that prints the starting and ending indices of each segment, just to make sure you are getting the splits right.<jupyter_code>n = len(train_valid_shuffled) k = 10 # 10-fold cross-validation for i in xrange(k): start = (n*i)/k end = (n*(i+1))/k-1 print i, (start, end)<jupyter_output>0 (0, 1938) 1 (1939, 3878) 2 (3879, 5817) 3 (5818, 7757) 4 (7758, 9697) 5 (9698, 11636) 6 (11637, 13576) 7 (13577, 15515) 8 (15516, 17455) 9 (17456, 19395) <jupyter_text>Let us familiarize ourselves with array slicing with SFrame. To extract a continuous slice from an SFrame, use colon in square brackets. For instance, the following cell extracts rows 0 to 9 of `train_valid_shuffled`. Notice that the first index (0) is included in the slice but the last index (10) is omitted.<jupyter_code>train_valid_shuffled[0:10] # rows 0 to 9<jupyter_output><empty_output><jupyter_text>Now let us extract individual segments with array slicing. Consider the scenario where we group the houses in the `train_valid_shuffled` dataframe into k=10 segments of roughly equal size, with starting and ending indices computed as above. Extract the fourth segment (segment 3) and assign it to a variable called `validation4`.<jupyter_code>validation4=train_valid_shuffled[5818:7758]<jupyter_output><empty_output><jupyter_text>To verify that we have the right elements extracted, run the following cell, which computes the average price of the fourth segment. When rounded to nearest whole number, the average should be $536,234.<jupyter_code>print int(round(validation4['price'].mean(), 0))<jupyter_output>536234 <jupyter_text>After designating one of the k segments as the validation set, we train a model using the rest of the data. To choose the remainder, we slice (0:start) and (end+1:n) of the data and paste them together. SFrame has `append()` method that pastes together two disjoint sets of rows originating from a common dataset. For instance, the following cell pastes together the first and last two rows of the `train_valid_shuffled` dataframe.<jupyter_code>n = len(train_valid_shuffled) first_two = train_valid_shuffled[0:2] last_two = train_valid_shuffled[n-2:n] print first_two.append(last_two)<jupyter_output>+------------+---------------------------+-----------+----------+-----------+ | id | date | price | bedrooms | bathrooms | +------------+---------------------------+-----------+----------+-----------+ | 2780400035 | 2014-05-05 00:00:00+00:00 | 665000.0 | 4.0 | 2.5 | | 1703050500 | 2015-03-21 00:00:00+00:00 | 645000.0 | 3.0 | 2.5 | | 4139480190 | 2014-09-16 00:00:00+00:00 | 1153000.0 | 3.0 | 3.25 | | 7237300290 | 2015-03-26 00:00:00+00:00 | 338000.0 | 5.0 | 2.5 | +------------+---------------------------+-----------+----------+-----------+ +-------------+----------+--------+------------+------+-----------+-------+------------+ | sqft_living | sqft_lot | floors | waterfront | view | condition | grade | sqft_above | +-------------+----------+--------+------------+------+-----------+-------+------------+ | 2800.0 | 5900 | 1 | 0 | 0 | 3 | 8 | 1660 | | 2490.0 | 59[...]<jupyter_text>Extract the remainder of the data after *excluding* fourth segment (segment 3) and assign the subset to `train4`.<jupyter_code>train4=train_valid_shuffled[0:5818].append(train_valid_shuffled[7758:19396])<jupyter_output><empty_output><jupyter_text>To verify that we have the right elements extracted, run the following cell, which computes the average price of the data with fourth segment excluded. When rounded to nearest whole number, the average should be $539,450.<jupyter_code>print int(round(train4['price'].mean(), 0))<jupyter_output>539450 <jupyter_text>Now we are ready to implement k-fold cross-validation. Write a function that computes k validation errors by designating each of the k segments as the validation set. It accepts as parameters (i) `k`, (ii) `l2_penalty`, (iii) dataframe, (iv) name of output column (e.g. `price`) and (v) list of feature names. The function returns the average validation error using k segments as validation sets. * For each i in [0, 1, ..., k-1]: * Compute starting and ending indices of segment i and call 'start' and 'end' * Form validation set by taking a slice (start:end+1) from the data. * Form training set by appending slice (end+1:n) to the end of slice (0:start). * Train a linear model using training set just formed, with a given l2_penalty * Compute validation error using validation set just formed<jupyter_code>def get_RSS(prediction, output): residual = output - prediction # square the residuals and add them up RS = residual*residual RSS = RS.sum() return(RSS) def k_fold_cross_validation(k, l2_penalty, data, output_name, features_list): n=len(data) RSS = 0 for i in range(0,k-1): start=(n*i)/k end=(n*(i+1))/k-1 validation=data[start:end+1] training=data[0:start].append(data[end+1:n]) model=graphlab.linear_regression.create(training,target=output_name,features = features_list, l2_penalty=l2_penalty,validation_set=None,verbose = False) prediction=model.predict(validation) rss=get_RSS(prediction, validation[output_name]) RSS=RSS+rss value_err=RSS/k return value_err <jupyter_output><empty_output><jupyter_text>Once we have a function to compute the average validation error for a model, we can write a loop to find the model that minimizes the average validation error. Write a loop that does the following: * We will again be aiming to fit a 15th-order polynomial model using the `sqft_living` input * For `l2_penalty` in [10^1, 10^1.5, 10^2, 10^2.5, ..., 10^7] (to get this in Python, you can use this Numpy function: `np.logspace(1, 7, num=13)`.) * Run 10-fold cross-validation with `l2_penalty` * Report which L2 penalty produced the lowest average validation error. Note: since the degree of the polynomial is now fixed to 15, to make things faster, you should generate polynomial features in advance and re-use them throughout the loop. Make sure to use `train_valid_shuffled` when generating polynomial features!<jupyter_code>import numpy as np poly_data = polynomial_sframe(train_valid_shuffled['sqft_living'], 15) my_features = poly_data.column_names() poly_data['price'] = train_valid_shuffled['price'] output_name='price' for l2_penalty in np.logspace(1, 7, num=13): Val_err = k_fold_cross_validation(10, l2_penalty, poly_data,output_name, my_features) print (l2_penalty,Val_err) <jupyter_output>(10.0, 476529406003612.0) (31.622776601683793, 273938216651549.56) (100.0, 147909441657361.3) (316.22776601683796, 109066503581727.88) (1000.0, 108042622266425.23) (3162.2776601683795, 110458360712246.33) (10000.0, 121981388561659.53) (31622.776601683792, 153321500552248.84) (100000.0, 205811306546692.06) (316227.76601683791, 225313137467116.16) (1000000.0, 228808655773395.7) (3162277.6601683795, 231991693540554.44) (10000000.0, 233720728134190.1) <jupyter_text>***QUIZ QUESTIONS: What is the best value for the L2 penalty according to 10-fold validation?***You may find it useful to plot the k-fold cross-validation errors you have obtained to better understand the behavior of the method. <jupyter_code># Plot the l2_penalty values in the x axis and the cross-validation error in the y axis. # Using plt.xscale('log') will make your plot more intuitive. <jupyter_output><empty_output><jupyter_text>Once you found the best value for the L2 penalty using cross-validation, it is important to retrain a final model on all of the training data using this value of `l2_penalty`. This way, your final model will be trained on the entire dataset.<jupyter_code>poly1_data = polynomial_sframe(train_valid_shuffled['sqft_living'], 15) # use equivalent of `polynomial_sframe` my_features = poly1_data.column_names() output_name='price' poly1_data['price'] = train_valid_shuffled['price'] model1 = graphlab.linear_regression.create(poly1_data, target = 'price', features = my_features, verbose = False, validation_set = None, l2_penalty=1000) Val_err = k_fold_cross_validation(10, 1000, poly1_data,output_name, my_features) print Val_err<jupyter_output>1.08042622266e+14
no_license
/Machine Learning-Specialization/Regression/Week-4/One/week-4-ridge-regression-assignment-1-blank.ipynb
Kuntal-G/MOOC-Courses
21
<jupyter_start><jupyter_text># Project: Investigate a Dataset (TMDB movie data) ## Table of Contents Introduction Data Wrangling Exploratory Data Analysis Conclusions ## Introduction For the Udacity Data Analyst Nano Degree project 2: investiagte a dataset, i have choosen the TMDB dataset out of the 5 dataset given. This data was originated from Kaggle, originally sourced from IMDB. In this project i will be cleaning and exploring the dataset, where questions below will be explored using some of the python tools learned from the class sessions, at the end i should be able to make sense of this data and answer these questions raised. __Questions:__ which year most of the movies were released?What are the High Budget Movies from year to year?what is the runtime, popularity and budget trends over the years?<jupyter_code>import numpy as np import pandas as pd from datetime import datetime import matplotlib.pyplot as plt % matplotlib inline<jupyter_output><empty_output><jupyter_text> ## Data Wrangling After observing the dataset that it is relatively clean and proposed questions for the analysis i will be keeping only relevent data, deleting data i dont need so that i can make the data easy and understandable. ### General Properties<jupyter_code># Load your data and print out a few lines. Perform operations to inspect data df = pd.read_csv('tmdb_movies.csv') df.head() <jupyter_output><empty_output><jupyter_text>An initial view of the data, its headings, rows and colums<jupyter_code>df.nunique() df.info() # a view of datatypes for str stored as object. print("I am imdb_id: ", type(df['imdb_id'][0])) print("I am original_title: ", type(df['original_title'][0])) print("I am cast: ", type(df['cast'][0])) print("I am homepage: ", type(df['homepage'][0])) print("I am director: ", type(df['director'][0])) print("I am tagline: ", type(df['tagline'][0])) print("I am keywords: ", type(df['keywords'][0])) print("I am overview: ", type(df['overview'][0])) print("I am genres: ", type(df['genres'][0])) print("I am production_companies: ", type(df['production_companies'][0])) print("I am release_date: ", type(df['release_date'][0]))<jupyter_output>I am imdb_id: <class 'str'> I am original_title: <class 'str'> I am cast: <class 'str'> I am homepage: <class 'str'> I am director: <class 'str'> I am tagline: <class 'str'> I am keywords: <class 'str'> I am overview: <class 'str'> I am genres: <class 'str'> I am production_companies: <class 'str'> I am release_date: <class 'str'> <jupyter_text>A confirmation of the datatypes for all columns described above as 'object'.<jupyter_code>null_check = df.loc[:,['id','budget','revenue','popularity','release_year','director','release_date']].sort_values(by =['budget'], ascending=True) null_check.head(9000) #identifying duplicated rows sum(df.duplicated()) df['is_duplicate_id'] = df.duplicated(['id']) df_dupe_id_filter = df[df['is_duplicate_id'] == True] df_dupe_id_filter.head() df_id_check_dupe = df[df['id'] == 42194] df_id_check_dupe.head() #drop duplicated ID row df.drop_duplicates(subset=['id'],inplace=True)<jupyter_output><empty_output><jupyter_text>> **Tip**: You should _not_ perform too many operations in each cell. Create cells freely to explore your data. One option that you can take with this project is to do a lot of explorations in an initial notebook. These don't have to be organized, but make sure you use enough comments to understand the purpose of each code cell. Then, after you're done with your analysis, create a duplicate notebook where you will trim the excess and organize your steps so that you have a flowing, cohesive report. > **Tip**: Make sure that you keep your reader informed on the steps that you are taking in your investigation. Follow every code cell, or every set of related code cells, with a markdown cell to describe to the reader what was found in the preceding cell(s). Try to make it so that the reader can then understand what they will be seeing in the following cell(s). ### Data Cleaning (Replace this with more specific notes!)<jupyter_code># After discussing the structure of the data and any problems that need to be # cleaned, perform those cleaning steps in the second part of this section.<jupyter_output><empty_output><jupyter_text># Drop columns:In this section, I've decided to drop columns that are extraneous to questions i would explore:1. imdb_id: this appears to relate to the previous IMDB data. Assumption is that this was left in by Kaggle to map the IMDB and TMDB ids together2. budget and revenue: since budget_adj and revenue_adj have already been normalised to 2010 levels for more direct comparision, these two columns are no longer required3. homepage, tagline, overview and keywords: seem unnecessary to include this for the type of intended analysis 4. is_duplicate_title: is no longer necessary<jupyter_code>df.drop(['imdb_id', 'budget', 'revenue', 'homepage', 'tagline', 'overview', 'keywords', 'is_duplicate_id'], axis=1, inplace=True) df.head()<jupyter_output><empty_output><jupyter_text>#### updating datatype<jupyter_code>df['release_date'] = pd.to_datetime(df['release_date']) # check it's worked type(df['release_date'][0])<jupyter_output><empty_output><jupyter_text> ## Exploratory Data Analysis > **Tip**: Now that you've trimmed and cleaned your data, you're ready to move on to exploration. Compute statistics and create visualizations with the goal of addressing the research questions that you posed in the Introduction section. It is recommended that you be systematic with your approach. Look at one variable at a time, and then follow it up by looking at relationships between variables. ### Question 1: which year most of the movies were released?<jupyter_code>df['release_year'].value_counts()[0:10] <jupyter_output><empty_output><jupyter_text>The year with most movies released from the data was year 2014 with 700 movies followed by year 2013, 2015 with 659 and 629 respectively.### Question 2: What are the High Budget Movies from year to year?<jupyter_code>def sort_by_budget(df): return df.sort_values(by = 'budget_adj',ascending = False)['original_title'].head(1) df.groupby('release_year').apply(sort_by_budget) df_movies = df.copy() df_movies.info() df_explore = df_movies.groupby('release_year').mean() df_explore.hist(figsize=(12, 16)); df_explore['runtime'].hist() plt.xlabel('Runtime') plt.title('Runtime Over the Years'); df_explore['runtime'].describe()<jupyter_output><empty_output><jupyter_text> 1. As seen in the plots and functions above, popular runtimes over the years are between 104 and 107 minutes. 2. The distribution is right skewed.<jupyter_code>df_explore['popularity'].hist() plt.xlabel('Popularity') plt.title('Popularity Over the Years'); df_explore['popularity'].describe()<jupyter_output><empty_output><jupyter_text> 1.As evident from histogram and quartile percentages, maximum ratings received fall in the 0.47 to 0.62 ranges. 2.The distribution is skewed to right. 3.It is observed that no values lie in the range 0.73 to 0.89. This needs further scrutiny.<jupyter_code>df_explore['revenue'].hist() plt.xlabel('revenue') plt.title('Revenue Over the Years'); df_explore['revenue'].describe()<jupyter_output><empty_output><jupyter_text> 1. the distribution is skewed to the left. 2. Most movie revenues fall in the 3.257984e+07 to 4.293171e+07 ranges. ` ## Conclusions:<jupyter_code>At the end of my cleaning and explorations; 1. Year 2014 had the highest number of release movies. 2. The same year 2014 The Hobbit: the battle of five armies had the most budget. 3. popular runtimes over the years are between 104 and 107 minutes. 4. Most movie revenues fall in the 3.257984e+07 to 4.293171e+07 ranges. REFRENCES: 1. https://pandas.pydata.org/pandas-docs/stable/reference/frame.html 2. https://www.shanelynn.ie/using-pandas-dataframe-creating-editing-viewing-data-in-python/ 3. https://developers.themoviedb.org/3/getting-started/popularity 4. https://prvnirupama.wordpress.com/2017/11/30/10-lessons-from-investigate-imdb-dataset/ 5. https://www.dataquest.io/blog/jupyter-notebook-tips-tricks-shortcuts/<jupyter_output><empty_output><jupyter_text> ## Submitting your Project > Before you submit your project, you need to create a .html or .pdf version of this notebook in the workspace here. To do that, run the code cell below. If it worked correctly, you should get a return code of 0, and you should see the generated .html file in the workspace directory (click on the orange Jupyter icon in the upper left). > Alternatively, you can download this report as .html via the **File** > **Download as** submenu, and then manually upload it into the workspace directory by clicking on the orange Jupyter icon in the upper left, then using the Upload button. > Once you've done this, you can submit your project by clicking on the "Submit Project" button in the lower right here. This will create and submit a zip file with this .ipynb doc and the .html or .pdf version you created. Congratulations!<jupyter_code>from subprocess import call call(['python', '-m', 'nbconvert', 'Investigate_a_Dataset.ipynb'])<jupyter_output><empty_output>
no_license
/Udacity Data Analyst Projects/project 2/Investigate_a_Dataset.ipynb
bakut/Udacity-Data-Analyst-Nano-Degree-projects
13
<jupyter_start><jupyter_text>Table of Contents <jupyter_code>%matplotlib inline %load_ext autoreload %autoreload 4 %autosave 120 from fastai.io import * from fastai.structured import * from sklearn.ensemble import RandomForestRegressor, RandomForestClassifier from pandas_summary import DataFrameSummary from IPython.display import display from sklearn import metrics import feather hist_trans = feather.read_dataframe('hist_trans') new_hist_trans = feather.read_dataframe('new_hist_trans') DataFrameSummary(new_hist_trans).summary().T temp1 = hist_trans.loc[hist_trans['card_id'].isin(['C_ID_05042ebd55'])] temp2 = hist_trans.loc[hist_trans['card_id'].isin(['C_ID_5c240d6e3c'])] temp = hist_trans.loc[hist_trans['card_id'].isin(['C_ID_05042ebd55', 'C_ID_5c240d6e3c'])] temp1['card_id'] = temp1.card_id.astype('category') temp1['merchant_id'] = temp1.merchant_id.astype('category') temp1.sort_values('purchase_date', inplace=True) la = temp1.groupby('card_id').rolling('30D', on='purchase_date')['purchase_amount'].sum().reset_index() la temp1.drop('month_rolling', axis=1, inplace=True) pd.merge(temp1, la, 'inner', on='purchase_date', suffixes=['_x', '_rolling_amount']) la = temp1.groupby(['card_id', 'month_diff'])['purchase_amount'].sum().reset_index() la la.groupby('card_id').agg(['mean', 'max', 'min']) def monthly_rolling(df, fe1, fe2): temp_df = df.sort_values('purchase_date') temp_df2 = temp_df.groupby('card_id').rolling('30D', on='purchase_date')[fe1].sum().reset_index() temp_df2 = temp_df2[['purchase_date', 'fe1']] return pd.merge temp1 temp1.sort_values('purchase_date').groupby('card_id').rolling('30D', on='purchase_date')['installments'].sum() temp1.sort_values('purchase_date').groupby('card_id')['purchase_date'].diff() temp1.sort_values('purchase_date').T def aggregate_per_month(history): grouped = history.groupby(['card_id', 'subsector_id'])['purchase_amount'] agg_func = { 'purchase_amount': ['count', 'sum', 'max', 'mean'] } intermediate_group = grouped.agg(agg_func) intermediate_group.columns = ['_'.join(col).strip() for col in intermediate_group.columns.values] intermediate_group.reset_index(inplace=True) final_group = intermediate_group.groupby('card_id').agg(['mean', 'sum', np.ptp, 'max']) final_group.columns = ['_'.join(col).strip() for col in final_group.columns.values] final_group.reset_index(inplace=True) return final_group la.groupby('card_id').agg(['mean', 'max', 'min', 'sum']) temp1.T aggregate_per_month(temp1).T aggregate_per_month(temp).T def successive_aggregates(df, field1, field2): t = df.groupby(['card_id', field1])[field2].mean() u = pd.DataFrame(t).reset_index().groupby('card_id')[field2].agg(['mean', 'min', 'max', 'std']) u.columns = [field1 + '_' + field2 + '_' + col for col in u.columns.values] u.reset_index(inplace=True) return u successive_aggregates(temp, 'authorized_flag', 'purchase_amount').T successive_aggregates(temp, 'category_1', 'purchase_amount').T def get_cat_agg(df): agg_df = agg_on_cat(df, 'category_1', 'purchase_amount') agg_df = pd.merge(agg_df, agg_on_cat(df, 'category_2', 'purchase_amount'), on='card_id', how='left') agg_df = pd.merge(agg_df, agg_on_cat(df, 'category_3', 'purchase_amount'), on='card_id', how='left') agg_df = pd.merge(agg_df, agg_on_cat(df, 'authorized_flag', 'purchase_amount'), on='card_id', how='left') return agg_df def agg_on_cat(df, category, feature): temp_df = df.pivot_table(index='card_id', columns=category, aggfunc={feature: ['sum', 'mean']}) cols = [category + '_{0[2]}_{0[0]}_{0[1]}'.format(col) for col in temp_df.columns.tolist()] temp_df.columns = cols return temp_df get_cat_agg(temp) def successive_aggregates(df, field1, field2): t = df.groupby(['card_id', field1])[field2].mean() u = pd.DataFrame(t).reset_index().groupby('card_id')[field2].agg(['mean', 'max', np.ptp, 'sum']) u.columns = [field1 + '_' + field2 + '_' + col for col in u.columns.values] u.reset_index(inplace=True) return u successive_aggregates(temp1, 'state_id', 'purchase_amount').T successive_aggregates(temp1, 'subsector_id', 'purchase_amount').T aggregate_per_month(temp1).T temp1.T def percentile(n): def percentile_(x): return x.quantile(0.5) percentile_.__name__ = 'percentile_{:2.0f}'.format(n*100) return percentile_ def aggregate_new_trans(df): aggs = {} # aggs['purchase_amount'] = ['sum','max','min','mean','median', percentile(80), percentile(20), percentile(75), percentile(25)] aggs['purchase_amount'] = [('sum', 'sum'), ('pct_75', lambda x: np.percentile(x, q = 75)), ('pct_25', lambda x: np.percentile(x, q = 25)), ('mean', 'mean'), ('median', 'median'), ('max', 'max'), ('min', 'min'), ('var', 'var'), ('skew', 'skew')] new_df = df.groupby(['card_id']).agg(aggs) new_df.columns = ['_'.join(col).strip() for col in new_df.columns.values] new_df.reset_index(inplace=True) other_df = (df.groupby('card_id') .size() .reset_index(name='transactions_count')) new_df = pd.merge(other_df, new_df, on='card_id', how='left') return new_df aggregate_new_trans(temp1) temp1['purchase_amount'].describe( percentiles = [ 0.25, 0.75 ] ) temp1.T temp1.groupby(['card_id'])['purchase_amount'].quantile(.2) temp<jupyter_output><empty_output>
no_license
/scrap pad 2.ipynb
mukeshpilaniya/kaggle
1
<jupyter_start><jupyter_text>#### STAMP scan of CATH domains and structural alignments Using domains defined in CATH and STAMP at different similarity levels/thresholds for generating structural alignments. Generating STAMP structural alignments for CATH superfamilies (structural groups and funfams) as they are provided by CATH.<jupyter_code>%run stamptools.py -h # before running STAMP for all CATH superfamilies # generated a superfamilies_analysis.txt summary with number of domains and SCGs/FFs %run cathtools.py -h # generates superfamilies_analysis.txt # updated to generate also a superfamilies_ssg_analysis.txt !qrsh -cwd -V pypy cathtools.py -a -l cath.log # sort by number of domains (column 3) !sort -n -k 3 ~/NOBACK/DB/superfamilies_analysis.txt > ~/NOBACK/DB/superfamilies_analysis_sorted.txt # sort by number of domains (column 4) !sort -n -k 4 ~/NOBACK/DB/superfamilies_analysis_ssg.txt > ~/NOBACK/DB/superfamilies_analysis_ssg_sorted.txt # sort by number of domains (column 4) !sort -n -k 4 ~/NOBACK/DB/superfamilies_analysis_ff.txt > ~/NOBACK/DB/superfamilies_analysis_ff_sorted.txt # running STAMP align for a particular superfamily (number 12) # Note that this method generates all the STAMP files necessary for initial scan # then runs STAMP and generates alignments and pdbs (superimpositions) for all # levels in the tree (treewise) %run stamptools.py -a 'ssg' -i '2.60.40.1520_1' -l align.log # same but on the cluster !qrsh -cwd -V python2.7 stamptools.py -a 'ssg' -i '2.60.40.1520_1' -l align.log # generating a list of superfamily_fam ids # if length > 1: !qrsh -cwd -V python2.7 stamptools.py -a 'ssg' -l stamp.log > ~/NOBACK/DB/superfamilies_ssg.txt !qrsh -cwd -V python2.7 stamptools.py -a 'ff' -l stamp.log > ~/NOBACK/DB/superfamilies_ff.txt # print("{}_{}".format(nspf, nfam)) # continue # running STAMP align for all SSGs in CATH !qsub -cwd -V -q 64bit.q jobhandler.py -n 50 -g -r 8 -stamp "-- -a 'ssg'" -f ~/NOBACK/DB/superfamilies_ssg.txt -l align1.log # running STAMP align for all FFs in CATH !qsub -cwd -V -q 64bit.q jobhandler.py -n 50 -g -r 8 -stamp "-- -a 'ff'" -f ~/NOBACK/DB/superfamilies_ff.txt -l align2.log # rerun some SCGs/FFs with some problem (as defined in stamp/stamp_summary.txt) !qsub -cwd -V -q 64bit.q jobhandler.py -n 50 -g -r 8 -stamp "-- -a 'ssg'" -f ~/NOBACK/DB/stamp/rerun.txt -l align.log # print out some quality metrics for each STAMP SCG/FF alignment !qrsh -cwd -V python2.7 stamptools.py -q 'ssg' -f ~/NOBACK/DB/superfamilies_ssg.txt -l align1.log !qrsh -cwd -V python2.7 stamptools.py -q 'ff' -f ~/NOBACK/DB/superfamilies_ff.txt -l align2.log # get summary information + variants !qrsh -cwd -V python2.7 stamptools.py -c 'ssg' -f ~/NOBACK/DB/superfamilies_ssg.txt -l align1.log !qrsh -cwd -V python2.7 stamptools.py -c 'ff' -f ~/NOBACK/DB/superfamilies_ff.txt -l align2.log # splitting the jobs with jobhandler !qsub -cwd -V -q 64bit.q jobhandler.py -n 100 -stamp "-- -q 'ssg'" -f ~/NOBACK/DB/superfamilies_ssg.txt -l align1.log !qsub -cwd -V -q 64bit.q jobhandler.py -n 100 -stamp "-- -q 'ff'" -f ~/NOBACK/DB/superfamilies_ff.txt -l align2.log # splitting the jobs with jobhandler !qsub -cwd -V -q 64bit.q jobhandler.py -n 200 -stamp "-- -c 'ssg'" -f ~/NOBACK/DB/superfamilies_ssg.txt -l align3.log !qsub -cwd -V -q 64bit.q jobhandler.py -n 200 -stamp "-- -c 'ff'" -f ~/NOBACK/DB/superfamilies_ff.txt -l align4.log # joining the files together !cat ~/NOBACK/DB/stamp_tmp/stats_ssg_*.txt > ~/NOBACK/DB/superfamilies_analysis_ssg_stats2.txt !cat ~/NOBACK/DB/stamp_tmp/stats_ff_*.txt > ~/NOBACK/DB/superfamilies_analysis_ff_stats2.txt # joining the files together !cat ~/NOBACK/DB/stamp_tmp/vars_ssg_*.txt > ~/NOBACK/DB/superfamilies_analysis_ssg_vars.txt !cat ~/NOBACK/DB/stamp_tmp/vars_ff_*.txt > ~/NOBACK/DB/superfamilies_analysis_ff_vars.txt # extend alignments !qsub -cwd -V stamptools.py -g 'ssg' -f ~/NOBACK/DB/superfamilies_ssg.txt -l align3.log !qsub -cwd -V stamptools.py -g 'ff' -f ~/NOBACK/DB/superfamilies_ff.txt -l align4.log !qsub -cwd -V -q 64bit.q jobhandler.py -n 100 -stamp "-- -g 'ssg'" -f ~/NOBACK/DB/superfamilies_ssg.txt -l align3.log !qsub -cwd -V -q 64bit.q jobhandler.py -n 100 -stamp "-- -g 'ff'" -f ~/NOBACK/DB/superfamilies_ff.txt -l align4.log<jupyter_output><empty_output>
non_permissive
/notebooks/6_STAMP_scans_and_alignments.ipynb
biomadeira/ProIntVar
1
<jupyter_start><jupyter_text>## M2 ModelWe can train Kingma's original M2 model in an unsupervised fashion.<jupyter_code>def px_graph(z, y): reuse = len(tf.get_collection(tf.GraphKeys.VARIABLES, scope='px')) > 0 # -- p(x) with tf.variable_scope('px'): zy = tf.concat((z, y), 1, name='zy/concat') # h1 = Dense(zy, 512, 'layer1', tf.nn.relu, reuse=reuse) # h2 = Dense(h1, 512, 'layer2', tf.nn.relu, reuse=reuse) # px_logit = Dense(h2, 784, 'logit', reuse=reuse) h3 = Dense(zy, 28 * 14 * 14, 'layer3', tf.nn.relu, reuse = reuse ) h3 = tf.reshape(h3,[-1, 14, 14, 28]) h4 = Conv2d_transpose(h3, 28, [3, 3], [1, 1], activation=tf.nn.relu, reuse = reuse, scope = "layer4") h5 = Conv2d_transpose(h4, 28, [3, 3], [1, 1], activation=tf.nn.relu, reuse = reuse, scope = "layer5") h6 = Conv2d_transpose(h5, 28, [3, 3], [2, 2], activation=tf.nn.relu, reuse = reuse, scope = "layer6") # h7 = Conv2d_transpose(h6, 28, [3, 3], [1, 1], activation=tf.nn.relu, reuse = reuse, scope = "layer7") px_logit = Conv2d(h6, 1, [2, 2], [1, 1] ,scope = "layer7", reuse = reuse) px_logit = tf.contrib.layers.flatten(px_logit) return px_logit tf.reset_default_graph() # print(Placeholder) x = Placeholder((None, 784), name = 'x') # binarize data and create a y "placeholder" with tf.name_scope('x_binarized'): xb = tf.cast(tf.greater(x, tf.random_uniform(tf.shape(x), 0, 1)), tf.float32) with tf.name_scope('y_'): y_ = tf.fill(tf.stack([tf.shape(x)[0], 10]), 0.0) # propose distribution over y qy_logit, qy = qy_graph(xb) # for each proposed y, infer z and reconstruct x z, zm, zv, px_logit = [[None] * 10 for i in xrange(4)] for i in xrange(10): with tf.name_scope('graphs/hot_at{:d}'.format(i)): y = tf.add(y_, Constant(np.eye(10)[i], name='hot_at_{:d}'.format(i))) z[i], zm[i], zv[i] = qz_graph(xb, y) px_logit[i] = px_graph(z[i], y) # Aggressive name scoping for pretty graph visualization :P with tf.name_scope('loss'): with tf.name_scope('neg_entropy'): nent = -cross_entropy_with_logits(logits = qy_logit, labels = qy) losses = [None] * 10 for i in xrange(10): with tf.name_scope('loss_at{:d}'.format(i)): losses[i] = labeled_loss(xb, px_logit[i], z[i], zm[i], zv[i], Constant(0), Constant(1)) with tf.name_scope('final_loss'): loss = tf.add_n([nent] + [qy[:, i] * losses[i] for i in xrange(10)]) show_graph(tf.get_default_graph().as_graph_def()) train_step = tf.train.AdamOptimizer().minimize(loss) sess = tf.Session() sess.run(tf.initialize_all_variables()) # sess.run(tf.global_variables_initializer()) # Change initialization protocol depending on tensorflow version sess_info = (sess, qy_logit, nent, loss, train_step) train(None, mnist, sess_info, epochs=2)<jupyter_output> tr_ent, tr_loss, t_ent, t_loss, t_acc, epoch 3.07e-01, 1.37e+02, 3.10e-01, 1.36e+02, 3.93e-01, 1 tr_ent, tr_loss, t_ent, t_loss, t_acc, epoch 2.88e-01, 1.23e+02, 2.85e-01, 1.22e+02, 4.22e-01, 2 <jupyter_text># Modified M2 (Gaussian mixture hidden layer)With some thought, we can modified M2 to implicitly be a latent variable model with a Gaussian mixture stochastic layer. Training is a bit finnicky, so you might have to run it a few times before it works properly.<jupyter_code>method = 'relu' def custom_layer(zy, reuse): # Here are 3 choices for what to do with zy # I leave this as hyperparameter if method == 'identity': return zy elif method == 'relu': return tf.nn.relu(zy) elif method == 'layer': return Dense(zy, 512, 'layer1', tf.nn.relu, reuse=reuse) else: raise Exception('Undefined method') def px_graph(z, y): reuse = len(tf.get_collection(tf.GraphKeys.VARIABLES, scope='px')) > 0 # -- transform z to be a sample from one of the Gaussian mixture components with tf.variable_scope('z_transform'): zm = Dense(y, 64, 'zm', reuse=reuse) zv = Dense(y, 64, 'zv', tf.nn.softplus, reuse=reuse) # h1 = Dense(y,128, 'h1', tf.nn.relu, reuse=reuse) # h2 = Dense(h1,128, 'h2', tf.nn.relu, reuse=reuse) # zm = Dense(h2, 64, 'zm', reuse=reuse) # zv = Dense(h2, 64, 'zv', tf.nn.softplus, reuse=reuse) # -- p(x) with tf.variable_scope('px'): with tf.name_scope('layer1'): zy = zm + tf.sqrt(zv) * z h1 = custom_layer(zy, reuse) h2 = Dense(h1, 512, 'layer2', tf.nn.relu, reuse=reuse) # h3 = tf.nn.dropout(h2, 0.5, name = 'layer3') # h4 = Dense(h2, 512, 'layer4', tf.nn.relu, reuse = reuse) # h5 = tf.nn.dropout(h4, 0.5, name = 'layer5') # # px_logit = Dense(h2, 784, 'logit', reuse=reuse) # px_logit = Dense(h5, 784, 'logit', reuse=reuse) # h2 = Dense(h1, 512, 'layer2', tf.nn.relu, reuse = reuse) h3 = Dense(h2, 28 * 14 * 14, 'layer3', tf.nn.relu, reuse = reuse ) h3 = tf.reshape(h3,[-1, 14, 14, 28]) h4 = Conv2d_transpose(h3, 28, [3, 3], [1, 1], activation=tf.nn.relu, reuse = reuse, scope = "layer4") h5 = Conv2d_transpose(h4, 28, [3, 3], [1, 1], activation=tf.nn.relu, reuse = reuse, scope = "layer5") h6 = Conv2d_transpose(h5, 28, [3, 3], [2, 2], activation=tf.nn.relu, reuse = reuse, scope = "layer6") # h7 = Conv2d_transpose(h6, 28, [3, 3], [1, 1], activation=tf.nn.relu, reuse = reuse, scope = "layer7") px_logit = Conv2d(h6, 1, [2, 2], [1, 1] ,scope = "layer7", reuse = reuse) px_logit = tf.contrib.layers.flatten(px_logit) return px_logit tf.reset_default_graph() x = Placeholder((None, 784), name ='x') # binarize data and create a y "placeholder" with tf.name_scope('x_binarized'): xb = tf.cast(tf.greater(x, tf.random_uniform(tf.shape(x), 0, 1)), tf.float32) with tf.name_scope('y_'): y_ = tf.fill(tf.stack([tf.shape(x)[0], 10]), 0.0) # propose distribution over y qy_logit, qy = qy_graph(xb) # for each proposed y, infer z and reconstruct x z, zm, zv, px_logit = [[None] * 10 for i in xrange(4)] for i in xrange(10): with tf.name_scope('graphs/hot_at{:d}'.format(i)): y = tf.add(y_, Constant(np.eye(10)[i], name='hot_at_{:d}'.format(i))) z[i], zm[i], zv[i] = qz_graph(xb, y) px_logit[i] = px_graph(z[i], y) # Aggressive name scoping for pretty graph visualization :P with tf.name_scope('loss'): with tf.name_scope('neg_entropy'): nent = -cross_entropy_with_logits(logits = qy_logit, labels = qy) losses = [None] * 10 for i in xrange(10): with tf.name_scope('loss_at{:d}'.format(i)): losses[i] = labeled_loss(xb, px_logit[i], z[i], zm[i], zv[i], Constant(0), Constant(1)) with tf.name_scope('final_loss'): loss = tf.add_n([nent] + [qy[:, i] * losses[i] for i in xrange(10)]) show_graph(tf.get_default_graph().as_graph_def()) train_step = tf.train.AdamOptimizer().minimize(loss) sess = tf.Session() sess.run(tf.initialize_all_variables()) # sess.run(tf.global_variables_initializer()) # Change initialization protocol depending on tensorflow version sess_info = (sess, qy_logit, nent, loss, train_step) train(None, mnist, sess_info, epochs=2)<jupyter_output> tr_ent, tr_loss, t_ent, t_loss, t_acc, epoch 2.57e-02, 1.40e+02, 2.68e-02, 1.39e+02, 1.91e-01, 1 tr_ent, tr_loss, t_ent, t_loss, t_acc, epoch 2.48e-02, 1.22e+02, 2.39e-02, 1.21e+02, 1.96e-01, 2 <jupyter_text># Explicit Gaussian Mixture VAEWhy be implicit when we can explicitly train a Gaussian Mixture VAE? So here's code for doing that. Unlike the modified M2, GMVAE is very stable. <jupyter_code>def px_graph(z, y): reuse = len(tf.get_collection(tf.GraphKeys.VARIABLES, scope='px')) > 0 # -- p(z) with tf.variable_scope('pz'): h1 = Dense(y, 128, 'h1', tf.nn.relu, reuse=reuse) h2 = Dense(h1, 128, 'h2', tf.nn.relu, reuse=reuse) zm = Dense(h2, 64, 'zm', reuse=reuse) zv = Dense(h2, 64, 'zv', tf.nn.softplus, reuse=reuse) # zv = Dense(y, 64, 'zv', tf.nn.softplus, reuse=reuse) # zm = Dense(y, 64, 'zm', reuse=reuse) # -- p(x) with tf.variable_scope('px'): h1 = Dense(z, 512, 'layer1', tf.nn.relu, reuse=reuse) # h2 = Dense(h1, 512, 'layer2', tf.nn.relu, reuse=reuse) # h3 = Dense(h2, 512, 'layer3', tf.nn.relu, reuse=reuse) # px_logit = Dense(h3, 784, 'logit', reuse=reuse) # h2 = Dense(h1, 512, 'layer2', tf.nn.relu, reuse = reuse) h3 = Dense(h1, 28 * 14 * 14, 'layer3', tf.nn.relu, reuse = reuse ) h3 = tf.reshape(h3,[-1, 14, 14, 28]) h4 = Conv2d_transpose(h3, 28, [3, 3], [1, 1], activation=tf.nn.relu, reuse = reuse, scope = "layer4") h5 = Conv2d_transpose(h4, 28, [3, 3], [1, 1], activation=tf.nn.relu, reuse = reuse, scope = "layer5") h6 = Conv2d_transpose(h5, 28, [3, 3], [2, 2], activation=tf.nn.relu, reuse = reuse, scope = "layer6") # h7 = Conv2d_transpose(h6, 28, [3, 3], [1, 1], activation=tf.nn.relu, reuse = reuse, scope = "layer7") # h8 = Conv2d_transpose(h7, 28, [3, 3], [1, 1], activation=tf.nn.relu, reuse = reuse, scope = "layer8") px_logit = Conv2d(h6, 1, [2, 2], [1, 1] ,scope = "layer7", reuse = reuse) px_logit = tf.contrib.layers.flatten(px_logit) return zm, zv, px_logit tf.reset_default_graph() x = Placeholder((None, 784), name = 'x') # binarize data and create a y "placeholder" with tf.name_scope('x_binarized'): xb = tf.cast(tf.greater(x, tf.random_uniform(tf.shape(x), 0, 1)), tf.float32) with tf.name_scope('y_'): y_ = tf.fill(tf.stack([tf.shape(x)[0], 10]), 0.0) # propose distribution over y qy_logit, qy = qy_graph(xb) # for each proposed y, infer z and reconstruct x z, zm, zv, zm_prior, zv_prior, px_logit = [[None] * 10 for i in xrange(6)] for i in xrange(10): with tf.name_scope('graphs/hot_at{:d}'.format(i)): y = tf.add(y_, Constant(np.eye(10)[i], name='hot_at_{:d}'.format(i))) z[i], zm[i], zv[i] = qz_graph(xb, y) zm_prior[i], zv_prior[i], px_logit[i] = px_graph(z[i], y) # Aggressive name scoping for pretty graph visualization :P with tf.name_scope('loss'): with tf.name_scope('neg_entropy'): nent = -cross_entropy_with_logits(logits = qy_logit, labels = qy) losses = [None] * 10 for i in xrange(10): with tf.name_scope('loss_at{:d}'.format(i)): losses[i] = labeled_loss(xb, px_logit[i], z[i], zm[i], zv[i], zm_prior[i], zv_prior[i]) with tf.name_scope('final_loss'): loss = tf.add_n([nent] + [qy[:, i] * losses[i] for i in xrange(10)]) show_graph(tf.get_default_graph().as_graph_def()) train_step = tf.train.AdamOptimizer().minimize(loss) sess = tf.Session() sess.run(tf.initialize_all_variables()) # sess.run(tf.global_variables_initializer()) # Change initialization protocol depending on tensorflow version sess_info = (sess, qy_logit, nent, loss, train_step) train(None, mnist, sess_info, epochs=2)<jupyter_output> tr_ent, tr_loss, t_ent, t_loss, t_acc, epoch 4.93e-02, 1.41e+02, 5.26e-02, 1.40e+02, 1.88e-01, 1 tr_ent, tr_loss, t_ent, t_loss, t_acc, epoch 4.77e-02, 1.22e+02, 4.50e-02, 1.21e+02, 2.03e-01, 2 <jupyter_text># Evaluation<jupyter_code>import glob import pandas as pd import seaborn as sns import os.path %pylab inline def prune_rows(arr, k): delete_rows = [] for i in xrange(len(arr)): if np.isnan(arr[i, k]): delete_rows += [i] return np.delete(arr, delete_rows, axis=0)[:, :k] def plot_from_csv(glob_str, axes, color_idx): dfs = [pd.read_csv(f) for f in glob.glob('logs/{:s}.log*'.format(glob_str))] df = (pd.concat(dfs, axis=1, keys=range(len(dfs))) .swaplevel(0, 1, axis=1) .sortlevel(axis=1)) df = df[:200].apply(pd.to_numeric) k = 199 ax1, ax2, ax3 = axes sns.tsplot(data=prune_rows(df['{:>10s}'.format('t_ent')].values.T, k), ax=ax1, condition=glob_str, color=sns.color_palette()[color_idx]) ax1.set_ylim(0,3) ax1.set_xlabel('Epochs') ax1.set_ylabel('Conditional Entropy') sns.tsplot(data=prune_rows(df['{:>10s}'.format('t_loss')].values.T, k), ax=ax2, condition=glob_str, color=sns.color_palette()[color_idx]) ax2.set_xlabel('Epochs') ax2.set_ylabel('Loss') sns.tsplot(data=prune_rows(df['{:>10s}'.format('t_acc')].values.T, k), ax=ax3, condition=glob_str, color=sns.color_palette()[color_idx]) ax3.set_xlabel('Epochs') ax3.set_ylabel('Accuracy') f, axes = plt.subplots(1,3, figsize=(20, 5)) plot_from_csv('m2', axes, 0) plt.savefig('images/m2.png') f, axes = plt.subplots(1,3, figsize=(20, 5)) plot_from_csv('modified_m2_method=relu', axes, 1) plt.savefig('images/modified_m2_method=relu.png') f, axes = plt.subplots(1,3, figsize=(20, 5)) plot_from_csv('gmvae', axes, 2) plt.savefig('images/gmvae.png') f, axes = plt.subplots(1,3, figsize=(20, 5)) plot_from_csv('m2', axes, 0) plot_from_csv('modified_m2_method=relu', axes, 1) plot_from_csv('gmvae', axes, 2) plt.savefig('images/combined.png')<jupyter_output>/Users/huxiaojing/tensorflow/lib/python2.7/site-packages/ipykernel_launcher.py:12: FutureWarning: sortlevel is deprecated, use sort_index(level= ...) if sys.path[0] == '':
permissive
/experiments.ipynb
SharynHu/vae-clustering-cnn
4
<jupyter_start><jupyter_text>- Look through sklearn datasets - Find a datasetnot used in example - Train a random forest model on your dataset - Determine which forest was the most accurate using the .score() function - Visualize feature importance with a bar plot<jupyter_code>from sklearn.datasets import load_boston from sklearn.ensemble import RandomForestRegressor import numpy as np import pandas as pd import matplotlib.pyplot as plt model = RandomForestRegressor(n_estimators=10,random_state=0, n_jobs= 10) boston = load_boston() df = pd.DataFrame(boston.data, columns=boston.feature_names) # load the dataset as a pandas data frame # Train model.fit(df, boston.target) print ("score: %f" % model.score(df, boston.target)) bestScore={0:0} for forest, tree in enumerate(model.estimators_): score = tree.score(df, boston.target) if score > list(bestScore.values())[0]: bestScore={forest:score} print(bestScore) # Extract single tree treeSTD = np.std([tree.feature_importances_ for tree in model.estimators_], axis=0) importances = model.feature_importances_ indices = np.argsort(importances)[::-1] totalFeatures = len(boston.data[0]) # Print the feature ranking print("\nFeature ranking:") for f in range(totalFeatures): print("%d. feature %s (%f)" % (f + 1, boston.feature_names[indices[f]], importances[indices[f]])) # Plot the feature importances of the forest fig = plt.figure(figsize=[12,6]) plt.title("Feature importances") plt.bar(range(totalFeatures), importances[indices], color="r", yerr=treeSTD[indices], align="center") plt.xticks(range(totalFeatures), boston.feature_names[indices]) plt.show() <jupyter_output>score: 0.973934 {8: 0.9369247737000006} Feature ranking: 1. feature RM (0.524906) 2. feature LSTAT (0.273742) 3. feature DIS (0.063590) 4. feature CRIM (0.034805) 5. feature NOX (0.028503) 6. feature TAX (0.017679) 7. feature B (0.016519) 8. feature PTRATIO (0.015914) 9. feature AGE (0.010513) 10. feature RAD (0.006009) 11. feature INDUS (0.004543) 12. feature ZN (0.003065) 13. feature CHAS (0.000211)
no_license
/Eduonix Edegree/4.Complete Guide to Machine Learning using Python/3. Random Forest/3.0 - Chapter Problem/3.0 - My Answer.ipynb
pvdwijdeven/ML_eduonix
1
<jupyter_start><jupyter_text># Computing saliency masks with the PAIRML saliency library in TF1 This notebook demonstrates a number of saliency mask techniques, augmented with the `SmoothGrad` technique, using the Inception V3 convolutional neural network. The intention of this notebook is to have as few dependencies as possible to show how to compute masks. This notebook shows the following techniques, alongside with the `SmoothGrad` augmentation: * Vanilla Gradients ([paper](https://scholar.google.com/scholar?q=Visualizing+higher-layer+features+of+a+deep+network&btnG=&hl=en&as_sdt=0%2C22), [paper](https://arxiv.org/abs/1312.6034)) * Guided Backpropogation ([paper](https://arxiv.org/abs/1412.6806)) * Integrated Gradients ([paper](https://arxiv.org/abs/1703.01365)) * XRAI ([paper](https://arxiv.org/abs/1906.02825)) * Grad-CAM ([paper](https://arxiv.org/abs/1610.02391)) * Blur IG ([paper](https://arxiv.org/abs/2004.03383)) * Guided IG ([paper](https://arxiv.org/abs/2106.09788)) This notebook assumes you have the `saliency` pip package installed. To install run (use `pip3` for python 3.x): ``` pip install saliency[tf1] tensorflow_hub ```<jupyter_code># Boilerplate imports. import tensorflow.compat.v1 as tf import numpy as np import PIL.Image from matplotlib import pylab as P # From our repository. import saliency.tf1 as saliency %matplotlib inline<jupyter_output>INFO:tensorflow:Enabling eager execution INFO:tensorflow:Enabling v2 tensorshape INFO:tensorflow:Enabling resource variables INFO:tensorflow:Enabling tensor equality INFO:tensorflow:Enabling control flow v2 <jupyter_text>### Utility methods<jupyter_code># Boilerplate methods. def ShowImage(im, title='', ax=None): if ax is None: P.figure() P.axis('off') im = (im * 255).astype(np.uint8) P.imshow(im) P.title(title) def ShowGrayscaleImage(im, title='', ax=None): if ax is None: P.figure() P.axis('off') P.imshow(im, cmap=P.cm.gray, vmin=0, vmax=1) P.title(title) def ShowHeatMap(im, title, ax=None): if ax is None: P.figure() P.axis('off') P.imshow(im, cmap='inferno') P.title(title) def LoadImage(file_path): im = PIL.Image.open(file_path) im = np.asarray(im) return im / 255<jupyter_output><empty_output><jupyter_text>### Loading the Inception model graph Run the following cell to download the network. This assumes you have the `tensorflow_hub` pip package installed. To install run (use `pip3` for python 3.x): ``` pip install tensorflow-hub ``` Alternatively, the pretrained network can be downloaded [here](https://tfhub.dev/google/imagenet/inception_v3/classification/1).<jupyter_code>import tensorflow_hub as hub model_path = "https://tfhub.dev/google/imagenet/inception_v3/classification/3" graph = tf.Graph() sess = tf.Session(graph=graph) with graph.as_default(): hub.Module(model_path) sess.run(tf.global_variables_initializer()) sess.run(tf.tables_initializer())<jupyter_output><empty_output><jupyter_text>### Adding a single logit tensor for which we want to compute the mask<jupyter_code>with graph.as_default(): images = graph.get_tensor_by_name('module/hub_input/images:0') logits = graph.get_tensor_by_name('module/InceptionV3/Logits/SpatialSqueeze:0') # Construct the scalar neuron tensor. neuron_selector = tf.placeholder(tf.int32) y = logits[:,neuron_selector] # Construct tensor for predictions. prediction = tf.argmax(logits, 1)<jupyter_output><empty_output><jupyter_text>### Load an image and infer<jupyter_code># Load the image im = LoadImage('./doberman.png') # Show the image ShowImage(im) # Make a prediction. prediction_class = sess.run(prediction, feed_dict = {images: [im]})[0] print("Prediction class: " + str(prediction_class)) # Should be a doberman, class idx = 237<jupyter_output>Prediction class: 237 <jupyter_text>### Vanilla Gradient & SmoothGrad<jupyter_code># Construct the saliency object. This doesn't yet compute the saliency mask, it just sets up the necessary ops. gradient_saliency = saliency.GradientSaliency(graph, sess, y, images) # Compute the vanilla mask and the smoothed mask. vanilla_mask_3d = gradient_saliency.GetMask(im, feed_dict = {neuron_selector: prediction_class}) smoothgrad_mask_3d = gradient_saliency.GetSmoothedMask(im, feed_dict = {neuron_selector: prediction_class}) # Call the visualization methods to convert the 3D tensors to 2D grayscale. vanilla_mask_grayscale = saliency.VisualizeImageGrayscale(vanilla_mask_3d) smoothgrad_mask_grayscale = saliency.VisualizeImageGrayscale(smoothgrad_mask_3d) # Set up matplot lib figures. ROWS = 1 COLS = 2 UPSCALE_FACTOR = 10 P.figure(figsize=(ROWS * UPSCALE_FACTOR, COLS * UPSCALE_FACTOR)) # Render the saliency masks. ShowGrayscaleImage(vanilla_mask_grayscale, title='Vanilla Gradient', ax=P.subplot(ROWS, COLS, 1)) ShowGrayscaleImage(smoothgrad_mask_grayscale, title='SmoothGrad', ax=P.subplot(ROWS, COLS, 2))<jupyter_output><empty_output><jupyter_text>### Guided Backprop & SmoothGrad<jupyter_code># Construct the saliency object. This doesn't yet compute the saliency mask, it just sets up the necessary ops. # NOTE: GuidedBackprop creates a copy of the given graph to override the gradient. # Don't construct too many of these! guided_backprop = saliency.GuidedBackprop(graph, sess, y, images) # Compute the vanilla mask and the smoothed mask. vanilla_guided_backprop_mask_3d = guided_backprop.GetMask( im, feed_dict = {neuron_selector: prediction_class}) smoothgrad_guided_backprop_mask_3d = guided_backprop.GetSmoothedMask( im, feed_dict = {neuron_selector: prediction_class}) # Call the visualization methods to convert the 3D tensors to 2D grayscale. vanilla_mask_grayscale = saliency.VisualizeImageGrayscale(vanilla_guided_backprop_mask_3d) smoothgrad_mask_grayscale = saliency.VisualizeImageGrayscale(smoothgrad_guided_backprop_mask_3d) # Set up matplot lib figures. ROWS = 1 COLS = 2 UPSCALE_FACTOR = 10 P.figure(figsize=(ROWS * UPSCALE_FACTOR, COLS * UPSCALE_FACTOR)) # Render the saliency masks. ShowGrayscaleImage(vanilla_mask_grayscale, title='Vanilla Guided Backprop', ax=P.subplot(ROWS, COLS, 1)) ShowGrayscaleImage(smoothgrad_mask_grayscale, title='SmoothGrad Guided Backprop', ax=P.subplot(ROWS, COLS, 2))<jupyter_output>INFO:tensorflow:Restoring parameters from /tmp/guided_backprop_ckpt <jupyter_text>### Integrated Gradients & SmoothGrad<jupyter_code># Construct the saliency object. This doesn't yet compute the saliency mask, it just sets up the necessary ops. integrated_gradients = saliency.IntegratedGradients(graph, sess, y, images) # Baseline is a black image. baseline = np.zeros(im.shape) # Compute the vanilla mask and the smoothed mask. vanilla_integrated_gradients_mask_3d = integrated_gradients.GetMask( im, feed_dict = {neuron_selector: prediction_class}, x_steps=25, x_baseline=baseline, batch_size=20) # Smoothed mask for integrated gradients will take a while since we are doing nsamples * nsamples computations. smoothgrad_integrated_gradients_mask_3d = integrated_gradients.GetSmoothedMask( im, feed_dict = {neuron_selector: prediction_class}, x_steps=25, x_baseline=baseline, batch_size=20) # Call the visualization methods to convert the 3D tensors to 2D grayscale. vanilla_mask_grayscale = saliency.VisualizeImageGrayscale(vanilla_integrated_gradients_mask_3d) smoothgrad_mask_grayscale = saliency.VisualizeImageGrayscale(smoothgrad_integrated_gradients_mask_3d) # Set up matplot lib figures. ROWS = 1 COLS = 2 UPSCALE_FACTOR = 10 P.figure(figsize=(ROWS * UPSCALE_FACTOR, COLS * UPSCALE_FACTOR)) # Render the saliency masks. ShowGrayscaleImage(vanilla_mask_grayscale, title='Vanilla Integrated Gradients', ax=P.subplot(ROWS, COLS, 1)) ShowGrayscaleImage(smoothgrad_mask_grayscale, title='Smoothgrad Integrated Gradients', ax=P.subplot(ROWS, COLS, 2))<jupyter_output><empty_output><jupyter_text>### XRAI Full and Fast<jupyter_code># Construct the saliency object. This doesn't yet compute the saliency mask, it just sets up the necessary ops. xrai_object = saliency.XRAI(graph, sess, y, images) # Compute XRAI attributions with default parameters xrai_attributions = xrai_object.GetMask(im, feed_dict={neuron_selector: prediction_class}, batch_size=20) # Set up matplot lib figures. ROWS = 1 COLS = 3 UPSCALE_FACTOR = 20 P.figure(figsize=(ROWS * UPSCALE_FACTOR, COLS * UPSCALE_FACTOR)) # Show original image ShowImage(im, title='Original Image', ax=P.subplot(ROWS, COLS, 1)) # Show XRAI heatmap attributions ShowHeatMap(xrai_attributions, title='XRAI Heatmap', ax=P.subplot(ROWS, COLS, 2)) # Show most salient 30% of the image mask = xrai_attributions >= np.percentile(xrai_attributions, 70) im_mask = np.array(im) im_mask[~mask] = 0 ShowImage(im_mask, title='Top 30%', ax=P.subplot(ROWS, COLS, 3)) # Create XRAIParameters and set the algorithm to fast mode which will produce an approximate result. xrai_params = saliency.XRAIParameters() xrai_params.algorithm = 'fast' # Compute XRAI attributions with fast algorithm xrai_attributions_fast = xrai_object.GetMask(im, feed_dict={neuron_selector: prediction_class}, extra_parameters=xrai_params, batch_size=20) # Set up matplot lib figures. ROWS = 1 COLS = 3 UPSCALE_FACTOR = 20 P.figure(figsize=(ROWS * UPSCALE_FACTOR, COLS * UPSCALE_FACTOR)) # Show original image ShowImage(im, title='Original Image', ax=P.subplot(ROWS, COLS, 1)) # Show XRAI heatmap attributions ShowHeatMap(xrai_attributions_fast, title='XRAI Heatmap', ax=P.subplot(ROWS, COLS, 2)) # Show most salient 30% of the image mask = xrai_attributions_fast >= np.percentile(xrai_attributions_fast, 70) im_mask = np.array(im) im_mask[~mask] = 0 ShowImage(im_mask, 'Top 30%', ax=P.subplot(ROWS, COLS, 3))<jupyter_output><empty_output><jupyter_text>### Grad-CAM<jupyter_code># Compare Grad-CAM and Smoothgrad with Grad-CAM. Note: This will take a long time to run. # GradCAM uses the final convolution layer, in this case "Mixed_7c" with graph.as_default(): conv_tensor = graph.get_tensor_by_name('module/InceptionV3/InceptionV3/Mixed_7c/concat:0') # Construct the saliency object. This doesn't yet compute the saliency mask, it just sets up the necessary ops. grad_cam = saliency.GradCam(graph, sess, y, images, conv_tensor) # Compute the Grad-CAM mask and the smoothed mask. grad_cam_mask_3d = grad_cam.GetMask(im, feed_dict = {neuron_selector: prediction_class}) smoothgrad_grad_cam_mask_3d = grad_cam.GetSmoothedMask(im, feed_dict = {neuron_selector: prediction_class}) # Call the visualization methods to convert the 3D tensors to 2D grayscale. grad_cam_mask_grayscale = saliency.VisualizeImageGrayscale(grad_cam_mask_3d) smoothgrad_grad_cam_mask_grayscale = saliency.VisualizeImageGrayscale(smoothgrad_grad_cam_mask_3d) # Set up matplot lib figures. ROWS = 1 COLS = 2 UPSCALE_FACTOR = 10 P.figure(figsize=(ROWS * UPSCALE_FACTOR, COLS * UPSCALE_FACTOR)) # Render the saliency masks. ShowGrayscaleImage(grad_cam_mask_grayscale, title='Grad-CAM', ax=P.subplot(ROWS, COLS, 1)) ShowGrayscaleImage(smoothgrad_grad_cam_mask_grayscale, title='SmoothGrad Grad-CAM', ax=P.subplot(ROWS, COLS, 2))<jupyter_output><empty_output><jupyter_text>### Guided IG<jupyter_code># Construct the saliency object. This doesn't yet compute the saliency mask, it just sets up the necessary ops. integrated_gradients = saliency.IntegratedGradients(graph, sess, y, images) guided_ig = saliency.GuidedIG(graph, sess, y, images) # Baseline is a black image for vanilla integrated gradients. baseline = np.zeros(im.shape) # Compute the vanilla mask and the Guided IG mask. vanilla_integrated_gradients_mask_3d = integrated_gradients.GetMask( im, feed_dict = {neuron_selector: prediction_class}, x_steps=25, x_baseline=baseline, batch_size=20) guided_ig_mask_3d = guided_ig.GetMask( im, feed_dict = {neuron_selector: prediction_class}, x_steps=25, x_baseline=baseline, max_dist=0.2, fraction=0.5) # Call the visualization methods to convert the 3D tensors to 2D grayscale. vanilla_mask_grayscale = saliency.VisualizeImageGrayscale(vanilla_integrated_gradients_mask_3d) guided_ig_mask_grayscale = saliency.VisualizeImageGrayscale(guided_ig_mask_3d) # Set up matplot lib figures. ROWS = 1 COLS = 3 UPSCALE_FACTOR = 20 P.figure(figsize=(ROWS * UPSCALE_FACTOR, COLS * UPSCALE_FACTOR)) # Render the saliency masks. ShowImage(im, title='Original Image', ax=P.subplot(ROWS, COLS, 1)) ShowGrayscaleImage(vanilla_mask_grayscale, title='Vanilla Integrated Gradients', ax=P.subplot(ROWS, COLS, 2)) ShowGrayscaleImage(guided_ig_mask_grayscale, title='Guided Integrated Gradients', ax=P.subplot(ROWS, COLS, 3))<jupyter_output><empty_output><jupyter_text>### Blur IG<jupyter_code># Construct the saliency object. This doesn't yet compute the saliency mask, it just sets up the necessary ops. integrated_gradients = saliency.IntegratedGradients(graph, sess, y, images) blur_ig = saliency.BlurIG(graph, sess, y, images) # Baseline is a black image for vanilla integrated gradients. baseline = np.zeros(im.shape) # Compute the vanilla mask and the Blur IG mask. vanilla_integrated_gradients_mask_3d = integrated_gradients.GetMask( im, feed_dict = {neuron_selector: prediction_class}, x_steps=25, x_baseline=baseline, batch_size=20) blur_ig_mask_3d = blur_ig.GetMask( im, feed_dict = {neuron_selector: prediction_class}, batch_size=20) # Call the visualization methods to convert the 3D tensors to 2D grayscale. vanilla_mask_grayscale = saliency.VisualizeImageGrayscale(vanilla_integrated_gradients_mask_3d) blur_ig_mask_grayscale = saliency.VisualizeImageGrayscale(blur_ig_mask_3d) # Set up matplot lib figures. ROWS = 1 COLS = 2 UPSCALE_FACTOR = 10 P.figure(figsize=(ROWS * UPSCALE_FACTOR, COLS * UPSCALE_FACTOR)) # Render the saliency masks. ShowGrayscaleImage(vanilla_mask_grayscale, title='Vanilla Integrated Gradients', ax=P.subplot(ROWS, COLS, 1)) ShowGrayscaleImage(blur_ig_mask_grayscale, title='Blur Integrated Gradients', ax=P.subplot(ROWS, COLS, 2)) # Compare BlurIG and Smoothgrad with BlurIG. Note: This will take a long time to run. # Construct the saliency object. This doesn't yet compute the saliency mask, it just sets up the necessary ops. blur_ig = saliency.BlurIG(graph, sess, y, images) # Compute the Blur IG mask and Smoothgrad+BlurIG mask. blur_ig_mask_3d = blur_ig.GetMask(im, feed_dict = {neuron_selector: prediction_class}, batch_size=20) # Smoothed mask for BlurIG will take a while since we are doing nsamples * nsamples computations. smooth_blur_ig_mask_3d = blur_ig.GetSmoothedMask(im, feed_dict = {neuron_selector: prediction_class}, batch_size=20) # Call the visualization methods to convert the 3D tensors to 2D grayscale. blur_ig_mask_grayscale = saliency.VisualizeImageGrayscale(blur_ig_mask_3d) smooth_blur_ig_mask_grayscale = saliency.VisualizeImageGrayscale(smooth_blur_ig_mask_3d) # Set up matplot lib figures. ROWS = 1 COLS = 2 UPSCALE_FACTOR = 10 P.figure(figsize=(ROWS * UPSCALE_FACTOR, COLS * UPSCALE_FACTOR)) # Render the saliency masks. ShowGrayscaleImage(blur_ig_mask_grayscale, title='Blur Integrated Gradients', ax=P.subplot(ROWS, COLS, 1)) ShowGrayscaleImage(smooth_blur_ig_mask_grayscale, title='Smoothgrad Blur IG', ax=P.subplot(ROWS, COLS, 2)) <jupyter_output><empty_output>
permissive
/Examples_tf1.ipynb
Pandinosaurus/saliency
12
<jupyter_start><jupyter_text># 可**制御**性判別 - 可制御性行列のランクが最大なら可制御<jupyter_code>Uc = matlab.ctrb(A, B) Rc = np.linalg.matrix_rank(Uc) print("ランク:",Rc,"サイズ:",min(Uc.shape))<jupyter_output>ランク: 2 サイズ: 2 <jupyter_text># 可**観測**性判別 - 可観測性行列のランクが最大なら可制御<jupyter_code>Uo = matlab.obsv(A, C) Ro = np.linalg.matrix_rank(Uo) print("ランク:",Ro,"サイズ:",min(Uo.shape))<jupyter_output>ランク: 2 サイズ: 2
no_license
/4章5章6章らへん.ipynb
gomi-kuzu/wakariyasui_modern_control
2
<jupyter_start><jupyter_text>## Lending Club loan analysis This notebook looks at data from the Lending Club database. The database and csv file containing all loan info and data dictionary were obtained from a Kaggle link (https://www.kaggle.com/wendykan/lending-club-loan-data). This database contains information relating to loans given from 2008 to 2015. * **Section 1:Exploratory analyses** is done to look at general features such as *average loan*, *interest rate* and *intallments* over time, *loan purposes* and *amounts* and *status* of loans. This is done in **SQL** and output files of the SQL query results are used here. * **Section 2: Only good and bad loans will be examined** (based on information for loan status from above. Also done in **SQL** and output files of the SQL query results are used here. Good and bad loans will be *stratified* by loan grade, US states the loans come from, loan amount, employment length, income, home ownership and installment. This is to examine if there are trends between type of loan and these features. For good/bad loans by states, the maps appear when viewed on NBViewer (https://nbviewer.jupyter.org/github/manieshablakey/loan-assessment-model/blob/master/loan-credit.ipynb) even if it may not appear on github. * **Section 3: Features selection:** look through all columns and determine which features will need to be kept for the loan prediction model. Those that contain information from the future/ not relevant will be removed. * **Section 4: Data cleaning:** Convert catgorical variables to integers. Remove rows with missing values. * **Section 5:** **Machine Learning models: 2 models** are used to make predictions: **1) K-Nearest Neighbors model** **2) Logistic Regression model.**<jupyter_code>import pandas as pd import numpy as np import matplotlib.pyplot as plt %matplotlib inline import sklearn from sklearn import metrics from sklearn.model_selection import train_test_split from sklearn.metrics import accuracy_score from sklearn.linear_model import LogisticRegression from sklearn.neighbors import KNeighborsClassifier from sklearn.metrics import roc_curve from sklearn.metrics import roc_auc_score import folium as folium import os import seaborn as sns<jupyter_output><empty_output><jupyter_text>### Section 1: Exploratory analysesLook at total records in the database.<jupyter_code>total= pd.read_csv("output/tot.csv") tot = total["total records"][0] print("The total number of records in the database is: {}.".format(tot))<jupyter_output>The total number of records in the database is: 887383. <jupyter_text>Look at the trends in average loan amount and average interest from 2008 to 2015. From 2008-09 and around 2012-2013, even though average interest rates were increasing, average loan amounts were also high/increasing. <jupyter_code># average loan. loan_month = pd.read_csv("output/avg-loan.csv") xmonth = loan_month["loan_year"] yavg_loan = loan_month["average_loan"] month_cat = np.arange(len(xmonth)) # average interest. int_month = pd.read_csv("output/avg-interest.csv") yavg_int = int_month["interest_rate"] month_cat = np.arange(len(xmonth)) # plot figure fig = plt.figure(figsize=(15, 5)) ax1 = fig.add_subplot(111) # figure for avg loan. ax1.set_xlabel("Year",fontsize=15) ax1.set_ylabel("Average loan amount ($)",fontsize=15) ax1.set_title("Average annual loan amount (from 2008 - 2015)",fontsize=15) ax1.plot(xmonth, yavg_loan, alpha=0.85, label="Average loan") # figure for avg interest. ax2 = ax1.twinx() ax2.set_ylabel("Average interest rate (%)",fontsize=15) ax2.plot(xmonth, yavg_int, alpha=0.8, label="Avg interest rate (%)", color="red") ax1.legend(loc="upper left") ax2.legend(loc="upper right") fig.tight_layout() plt.show()<jupyter_output><empty_output><jupyter_text>Look at trends in average loan installment from 2008 to 2015. This has increased from around $275/month is 2008 to around $425 in 2015.<jupyter_code># avg installment install = pd.read_csv("output/avg-installment.csv") avg_instal = pd.DataFrame({"loan_year": [2008, 2009, 2010, 2011, 2012, 2013, 2014, 2015], "installment": [279.0, 330.0, 303.0, 338.0, 422.0, 452.0, 422.0, 422.0]}) avg_instal.plot(kind="line", x="loan_year", y="installment", color="b", alpha=0.3, figsize=(15,5), fontsize=15) plt.xlabel('Year', fontsize=15) plt.ylabel('Average instalment ($)', fontsize=15) plt.title('Average loan installment (from 2008 - 2015)', fontsize=15) plt.show()<jupyter_output><empty_output><jupyter_text>What are the loans used for? We can see that the bulk of it is used for debt consolidation, followed by credit card payments.<jupyter_code># loan purpose purpose = pd.read_csv("output/purpose.csv") x_purpose = purpose["purpose"] y_perct = purpose["Percentage"] purpose_cat = np.arange(len(x_purpose)) plt.bar(x_purpose, y_perct) plt.xticks(purpose_cat, x_purpose, rotation=90) plt.ylabel("% of loans") plt.title("Purpose of loan", fontsize=15) plt.show()<jupyter_output><empty_output><jupyter_text>Categorize loan amounts into 4 categorize to see what are the most common loan amounts give. Most loans re between $5000- $15000.<jupyter_code># loan categories lf = pd.read_csv("output/loan-cat.csv") lf = pd.DataFrame({"loan_amount": [1, 2, 3, 4], "total" : [75513, 460027, 248174, 103669]}) lf["loan_amount"] = lf["loan_amount"].replace({1: "under $5000", 2: "$5000 - $15000", 3: "$15000 - $25000", 4: "$25000 and above"}) lf.plot(kind="bar", x="loan_amount", y="total", color="g", alpha=0.7) plt.xlabel('Loan category', fontsize=15) plt.ylabel('No of loans') plt.title('Loan categories') plt.show()<jupyter_output><empty_output><jupyter_text>What is the distribution of loan status of all loans in the database? Most are current loans (> 600,00).<jupyter_code># loan distribution loan_type = pd.read_csv("output/loan-status-overall.csv") loan_type<jupyter_output><empty_output><jupyter_text>### Section 2: Exploratory analyses stratified by good/bad loans. For the rest of the analyses in this notebook, loans will be categorized as follows: - Good loans (this will be made up of fully paid loans). - Bad loans (will consist of charged off loans. - Remaining will not be taken into account as they are either current or there isn't enought information to determine status. - To get a better sense of what determines if a loan will be paid off or not, we will look at the good/bad loans only.<jupyter_code># good/bad loan and numbers loan_type = pd.read_csv("output/loan-status.csv") print("No of bad loans and good loans, respectively:") loan_type<jupyter_output>No of bad loans and good loans, respectively: <jupyter_text>Total number of good & bad loans, from the above and plotted as a pie chart in %. We can see that >80% of loans are good loans.<jupyter_code>loan_status_tot = loan_type["number"].sum() print("The total number of good & bad loans is: {}.".format(loan_status_tot)) # % good/bad loans. values = [45248, 207724] colors = ["r", "b"] labels = ["Charged Off", "Fully Paid"] plt.pie(values, colors=colors, labels=labels, autopct="%0.1f%%") plt.title("Percentage of good and bad loans") plt.show()<jupyter_output>The total number of good & bad loans is: 252972. <jupyter_text>Dollar value of bad and good loans<jupyter_code># total loan value for each type. loan_val = pd.read_csv("output/loans-total.csv") loan_val.plot(kind="bar", x="loan_status", y="dollar_total", color="b", alpha=0.7) plt.xlabel('Loan status', fontsize=15) plt.ylabel('Total loan amount ($)') plt.title('Total loan value') plt.show()<jupyter_output><empty_output><jupyter_text>Good and bad loans by loan grade. Most good loans range from grade A to C. Most bad loans range from grade B to D.<jupyter_code># loan type by loan grade. lg = pd.read_csv("output/loans-grade.csv") # loan_grade # set width of bar barWidth = 0.3 fig = plt.figure(figsize=(8, 4)) # set height of bar xgrade = lg["grade"].drop_duplicates() y_bad = lg[lg["loan_status"]== "Charged Off"]["loans number"] y_good = lg[lg["loan_status"] == "Fully Paid"]["loans number"] # Set position of bar on X axis r1 = np.arange(len(xgrade)) r2 = [x + barWidth for x in r1] r3 = [x + barWidth for x in r2] # Make the plot plt.bar(r1, y_bad, color="r", alpha=0.7, width=barWidth, label="Bad loans") plt.bar(r2, y_good, color="g", alpha=0.7, width=barWidth, label="Good loans") # Add xticks on the middle of the group bars plt.xlabel("Loan grades", fontsize=15) plt.xticks([r + barWidth for r in range(len(xgrade))], ["A", "B", "C", "D", "E", "F", "G"]) # Create legend & Show graphic plt.legend() fig.tight_layout() plt.show()<jupyter_output><empty_output><jupyter_text>From above, we see that the majority of good/bad loans fall into grades A to D. Therefore, we will look at grade A to D to see what the main purposes of taking loans are. Again, the main reason across all 4 grades is for debt consolidation, followed by credit card payment.<jupyter_code># loan purpose, by grade. # Setting up a figure to accomodate 4 grades given in the dataset. fig, axes = plt.subplots(nrows=2, ncols=2, figsize=(14, 10)) fig.suptitle("Fig 1- Purpose of loans, by loan grade", fontweight="bold", size=18) # Grade A loans, by purpose. gp_a = pd.read_csv("output/grade-a-purpose.csv") gp_a.plot(kind="bar", ax=axes[0,0], x="purpose", y="count") axes[0,0].set_title("Grade A loans", fontsize=15) axes[0,0].set_xlabel("Loan purpose") axes[0,0].set_ylabel("No of loans") # Grade B loans, by purpose. gp_b = pd.read_csv("output/grade-b-purpose.csv") gp_b.plot(kind="bar", ax=axes[0,1], x="purpose", y="count") axes[0,1].set_title("Grade B loans", fontsize=15) axes[0,1].set_xlabel("Loan purpose") axes[0,1].set_ylabel("count") # Grade C loans, by purpose. gp_c = pd.read_csv("output/grade-c-purpose.csv") gp_c.plot(kind="bar", ax=axes[1,0], x="purpose", y="count") axes[1,0].set_title("Grade C loans", fontsize=15) axes[1,0].set_xlabel("Loan purpose") axes[1,0].set_ylabel("count") # Grade D loans, by purpose. gp_d = pd.read_csv("output/grade-d-purpose.csv") gp_d.plot(kind="bar", ax=axes[1,1], x="purpose", y="count") axes[1,1].set_title("Grade D loans", fontsize=15) axes[1,1].set_xlabel("Loan purpose") axes[1,1].set_ylabel("count") fig.tight_layout(pad=7, w_pad=12, h_pad=3) fig.show()<jupyter_output>/usr/local/lib/python3.7/site-packages/matplotlib/figure.py:445: UserWarning: Matplotlib is currently using module://ipykernel.pylab.backend_inline, which is a non-GUI backend, so cannot show the figure. % get_backend()) <jupyter_text>Next, we'll look at the distribution of good loans across US states. The map describes the number of loans. We can see from the figure below that the highest number of good loans come from California. This is followed by Texas, Florida and New York. These are also the 4 most populous states in the US. <jupyter_code># map loads when viewed with nbviewer (https://nbviewer.jupyter.org/github/manieshablakey/loan-assessment-model/blob/master/loan-credit.ipynb) # good loans by state good_loans_state = pd.read_csv("output/good-loans-state.csv") # Vol of loans by state. # load map shape (US states) state_geo = os.path.join("data", "us-states.json") # load loan data for each state df = os.path.join("output", "good-loans-state.csv") good_loans = pd.read_csv(df) # initialize map m = folium.Map(location=[37, -102], zoom_start=4) # set parameters for map, add color. folium.Choropleth( geo_data=state_geo, name="choropleth", data=good_loans, columns=["addr_state", "loans_vol"], key_on="feature.id", fill_color="BuGn", fill_opacity=0.8, line_opacity=0.3, legend_name="No of good loans" ).add_to(m) folium.LayerControl().add_to(m) m # # Save to html # m.save("good_loans_map.html")<jupyter_output><empty_output><jupyter_text>Let's look at the distribution of bad loans across US states. The map describes the number of loans. We can see from the figure below that the highest number of bad loans come from California. Next is New York state. This is followed by Texas and Florida.<jupyter_code># map loads when viewed with nbviewer (https://nbviewer.jupyter.org/github/manieshablakey/loan-assessment-model/blob/master/loan-credit.ipynb) # bad loans by state bad_loans_state = pd.read_csv("output/bad-loans-state.csv") # Vol of loans by state. # load map shape (US states) state_geo = os.path.join("data", "us-states.json") # load loan data for each state dl = os.path.join("output", "bad-loans-state.csv") bad_loans = pd.read_csv(dl) # initialize map m = folium.Map(location=[37, -102], zoom_start=4) # set parameters for map, add color. folium.Choropleth( geo_data=state_geo, name='choropleth', data=bad_loans, columns=['addr_state', 'loans_vol'], key_on='feature.id', fill_color='RdPu', fill_opacity=0.8, line_opacity=0.3, legend_name='No of bad loans' ).add_to(m) folium.LayerControl().add_to(m) m # # Save to html # m.save("bad_loans_map.html")<jupyter_output><empty_output><jupyter_text>Bulk of good & bad loans both come from 5000 to 25000 dollar category.<jupyter_code># loan amount by loan status ls = pd.read_csv("output/loan-status-amount.csv") ls = ls.reindex(index = [3, 2, 0, 1, 7, 6, 4, 5]) # set width of bar barWidth = 0.3 fig = plt.figure(figsize=(8, 4)) # set height of bar x_grade = ls["loans"].drop_duplicates() y_bad = ls[ls["loan_status"] == "Charged Off"]["total"] y_good = ls[ls["loan_status"] == "Fully Paid"]["total"] # Set position of bar on X axis r1 = np.arange(len(x_grade)) r2 = [x + barWidth for x in r1] r3 = [x + barWidth for x in r2] # Make the plot plt.bar(r1, y_bad, color="r", alpha=0.7, width=barWidth, label="Bad loans") plt.bar(r2, y_good, color="g", alpha=0.7, width=barWidth, label="Good loans") # Add xticks on the middle of the group bars plt.xlabel("Loan numbers, by loan amount", fontweight="bold") plt.xticks([r + barWidth for r in range(len(x_grade))], ["<$5000", "$5000-25000", "$25000-30000", "$30000-35000"]) # Create legend & Show graphic plt.legend() fig.tight_layout() plt.show()<jupyter_output><empty_output><jupyter_text>Looking at the employment lengths of those with good loans and bad loans, we see that the trends are very similar, with the majority in > 10 years of employment across both loan categories.<jupyter_code># employment length by loan type el = pd.read_csv("output/employment-loan.csv") el = el.reindex(index = [10, 0, 2, 3, 4, 5, 6, 7, 8, 9, 1, 21, 11, 13, 14, 15, 16, 17, 18, 19, 20, 12]) # set width of bar barWidth = 0.3 fig = plt.figure(figsize=(8, 4)) # set height of bar x_length = el["emp_length"].drop_duplicates() y_bad = el[el["loan_status"] == "Charged Off"]["number"] y_good = el[el["loan_status"] == "Fully Paid"]["number"] # Set position of bar on X axis r1 = np.arange(len(x_length)) r2 = [x + barWidth for x in r1] r3 = [x + barWidth for x in r2] # Make the plot plt.bar(r1, y_bad, color="r", alpha=0.7, width=barWidth, label="Bad loans") plt.bar(r2, y_good, color="g", alpha=0.7,width=barWidth, label="Good loans") # Add xticks on the middle of the group bars plt.xlabel("Length of employment (in years)", fontweight="bold") plt.xticks([r + barWidth for r in range(len(x_length))], ["< 1", "1", "2", "3", "4", "5", "6", "7", "8", "9", "10+"]) # Create legend & show graph plt.legend() fig.tight_layout() plt.show()<jupyter_output><empty_output><jupyter_text>When we look at income levels by loan time, the patterns are also similar, with the most loans in both loan types given to those with an annual income between 50,000 and 100,000 dollars.<jupyter_code># income by loan type li = pd.read_csv("output/income-loan.csv") li = li.reindex(index = [2, 0, 1, 3, 6, 4, 5, 7]) # set width of bar barWidth = 0.3 fig = plt.figure(figsize=(8,4)) # set height of bar x_length = li["income"].drop_duplicates() y_bad = li[li["loan_status"] == "Charged Off"]["total"] y_good = li[li["loan_status"] == "Fully Paid"]["total"] # set position of bar on X asia r1 = np.arange(len(x_length)) r2 = [x + barWidth for x in r1] r3 = [x + barWidth for x in r2] # Make the plot plt.bar(r1, y_bad, color="r", alpha=0.7, width=barWidth, label="Bad loans") plt.bar(r2, y_good, color="g", alpha=0.7, width=barWidth, label="Good loans") # Add xticks on the middle of the group bars plt.xlabel("Income by loan status", fontweight="bold") plt.xticks([r + barWidth for r in range(len(x_length))], ["< $25000", "25000− 50000", "50000− 100000", "over $100000"]) # Create legend & Show graphic plt.legend() fig.tight_layout() plt.show()<jupyter_output><empty_output><jupyter_text>Looking at home ownership, we see that the majority of those with good loans have a mortgage on their homes. The majority of those with bad loans are tied between those with mortgages and renters.<jupyter_code># home ownership by loan type lh = pd.read_csv("output/home-loan.csv") # set width of bar barWidth = 0.3 fig = plt.figure(figsize=(8, 4)) # set height of bar x_length = lh["home_ownership"].drop_duplicates() y_bad = lh[lh["loan_status"] == "Charged Off"]["total"] y_good = lh[lh["loan_status"] == "Fully Paid"]["total"] # Set position of bar on X axis r1 = np.arange(len(x_length)) r2 = [x + barWidth for x in r1] r3 = [x + barWidth for x in r2] # Make the plot plt.bar(r1, y_bad, color="r", alpha=0.7, width=barWidth, label="Bad loans") plt.bar(r2, y_good, color="g", alpha=0.7, width=barWidth, label="Good loans") # Add xticks on the middle of the group bars plt.xlabel("Home ownership by loan status", fontweight="bold") plt.xticks([r + barWidth for r in range(len(x_length))], ["MORTGAGE", "OWN", "RENT"]) # Create legend & Show graphic plt.legend() fig.tight_layout() plt.show()<jupyter_output><empty_output><jupyter_text>The trends between installment amounts and loan type are very similar across both categories. Most installments are between $250-$500 for each category.<jupyter_code># installment by loan type inst = pd.read_csv("output/loan-installment.csv") inst = inst.reindex( index = [3, 0, 1, 2, 4, 8, 5, 6, 7, 9]) inst # set width of bar barWidth = 0.3 fig = plt.figure(figsize=(6, 4)) # set height of bar bars1 = [10155, 19631, 10163, 3896, 1403] bars2 = [57603, 88509, 40555, 15140, 5917] # set position of bar on X asia r1 = np.arange(len(bars1)) r2 = [x + barWidth for x in r1] # Make the plot plt.bar(r1, bars1, color="r", alpha=0.7, width=barWidth, label="Bad loans") plt.bar(r2, bars2, color="g", alpha=0.7, width=barWidth, label="Good loans") # Add xticks on the middle of the group bars plt.xlabel("Installment by loan status ($)", fontsize=15) plt.xticks([r + barWidth for r in range(len(bars1))], ["< 250", "250− 500", "500− 750", "750- 1000", "over 1000"], rotation=90) # Create legend & Show graphic plt.legend() plt.show()<jupyter_output><empty_output><jupyter_text>### Section 3: Features selection In this section, we look at all columns in the dataset to determine if the features will be relevant/useful to predict loan status outcome. Those that are not useful or contain information from the future (after loans have been assigned) will be removed from the dataset.<jupyter_code>df = pd.read_csv("data/loan.csv", low_memory=False) half_count = len(df) / 2 # Drop columns with > 50% missing values. df = df.dropna(thresh=half_count, axis=1) # Drop the following column which which does not provide useful df = df.drop(["url"], axis=1) # Now, look at first 5 rows of dataset. df.head() # look at shape of data. There are 887,379 rows and 52 columns in the current dataset. df.shape # These are all the column names. # The LCDataDictionary file has been provided in the data folder and gives a description of each column. df.columns<jupyter_output><empty_output><jupyter_text>Each column needs to be examined to determine if it is useful to be kept in the dataset. To do this, the columns are divided into groups. For each group, every column will be compared to column dictionary to see what it represents. If they contain information that is not useful or obtained only after loans are assigned, they are removed from the dataset as such features will not contribute to a loan outcome prediction model. <jupyter_code># First 20 columns. df.iloc[:, :20].head()<jupyter_output><empty_output><jupyter_text>From the above, the following columns are removed are the information they provide is not useful/redundant: id, member_id, sub_grade(grade info has been kept),int_rate (grade info has accounted for this), emp_title, issue_d and title (purpose is kept). These columns are also removed as they contain information from after loans are given: funded_amt & funded_amt_inv<jupyter_code># remove the following columns. drop_cols1 = ['id','member_id','funded_amnt','funded_amnt_inv', 'int_rate','sub_grade','emp_title','issue_d'] df = df.drop(drop_cols1,axis=1)<jupyter_output><empty_output><jupyter_text>After the above columns are dropped, we look at the remaining columns. Looking at the table below, zip_code column is dropped as the information given is not useful (we have state names). All other columns below are dropped as they contain future info that is not valuable for a loan outcome prediction model.<jupyter_code># next lot of columns df.iloc[50:60, 19:38].head() # remove the following columns. drop_cols2 = [ 'zip_code','out_prncp','out_prncp_inv', 'total_pymnt','total_pymnt_inv', 'delinq_2yrs', 'initial_list_status'] df = df.drop(drop_cols2, axis=1) drop_cols3 = ['total_rec_prncp','total_rec_int', 'total_rec_late_fee', 'recoveries', 'collection_recovery_fee', 'last_pymnt_d', 'last_pymnt_amnt'] df = df.drop(drop_cols3, axis=1)<jupyter_output><empty_output><jupyter_text>Since dataset column numbers changed between the first drop of columns and the second, take a look at the remaining columns to see if there are others that need to be removed.<jupyter_code>df.columns<jupyter_output><empty_output><jupyter_text>Looking at the above columns remaining, we remove the following columns that future information/ info that is not useful.<jupyter_code># additional columns to drop drop_cols4 = ['title', 'inq_last_6mths','next_pymnt_d', 'collections_12_mths_ex_med', 'policy_code', 'application_type', 'acc_now_delinq', 'tot_coll_amt', 'tot_cur_bal', 'total_rev_hi_lim'] df = df.drop(drop_cols4, axis=1) df.head()<jupyter_output><empty_output><jupyter_text>From the above dataframe, let's look at the unique instances of the payment plan feature<jupyter_code># pymnt_plan unique pyment = df["pymnt_plan"].unique().tolist() print("No of instances of y in pyment_plan: {}".format((df["pymnt_plan"]== "y").sum())) print("No of instances of n in pyment_plan: {}".format((df["pymnt_plan"]== "n").sum()))<jupyter_output>No of instances of y in pyment_plan: 10 No of instances of n in pyment_plan: 887369 <jupyter_text>There are too few instances of y compared to n to make meaningful inferences, in pymnt_plan columns. Therefore, it can be dropped.<jupyter_code>df = df.drop("pymnt_plan", axis=1)<jupyter_output><empty_output><jupyter_text>Take another look at the columns to make sure all are useful/do not contain future info.<jupyter_code>df.columns<jupyter_output><empty_output><jupyter_text>#### Target variable: Loan status The aim of the machine learning models are to predict good/bad loans. Therefore, loan status is the outcome variable. From the exploratory analysis section above, we saw there were several categories of loans. For this model, we will only consider the "Fully Paid" (good) loans and "Charged Off" (bad) loans. We filter the dataset to only included these rows,<jupyter_code>df = df[(df["loan_status"] == "Fully Paid") | (df["loan_status"] == "Charged Off")] df["loan_status"].unique().tolist()<jupyter_output><empty_output><jupyter_text>From the features selected from the dataset, look at general statistics of numerical columns.<jupyter_code>df.describe() # save data. df.to_csv("data/updated_loan_data.csv", index=False)<jupyter_output><empty_output><jupyter_text> ### Section 4: Data Cleaning Start by loading the dataset with relevant columns as saved above and take a look at the first 5 rows.<jupyter_code>updated_loans = pd.read_csv("data/updated_loan_data.csv") print(updated_loans.shape) updated_loans.head() # Look at all the data types in the dataset. updated_loans.dtypes # Look at all missing values in the dataset. null_values = updated_loans.isnull().sum() null_values # Drop rows with missing values updated_loans = updated_loans.dropna() updated_loans.isnull().sum()<jupyter_output><empty_output><jupyter_text>#### Categorical variables Take a look at any one row in the dataset to see how object variables are formatted.<jupyter_code>obj_col = updated_loans.select_dtypes(include=["object"]) print(obj_col.iloc[5])<jupyter_output>term 36 months grade E emp_length 9 years home_ownership RENT verification_status Source Verified loan_status Fully Paid purpose car addr_state CA earliest_cr_line Jan-2007 last_credit_pull_d Dec-2014 Name: 5, dtype: object <jupyter_text>Looking at this row, drop date value columns as these are not useful for the model. The columns are earliest_cr_line & last_credit_pull_d<jupyter_code>drop_cols5 = ["earliest_cr_line", "last_credit_pull_d"] updated_loans = updated_loans.drop(drop_cols5, axis=1)<jupyter_output><empty_output><jupyter_text>In order to convert categorical variables into integers, let's first look at the unique instances for each of the categorical variables left.<jupyter_code>term_unique = updated_loans["term"].unique() grade_unique = updated_loans["grade"].unique() emp_length_unique = updated_loans["emp_length"].unique() home_own_unique = updated_loans["home_ownership"].unique() ver_status_unique = updated_loans["verification_status"].unique() loan_unique = updated_loans["loan_status"].unique() purpose_unique = updated_loans["purpose"].unique() add_state_unique = updated_loans["addr_state"].unique() print("The unique instances of loan term are: {}".format(term_unique)) print("The unique instances of loan grades are: {}".format(grade_unique)) print("The unique instances of employment length are: {}".format(emp_length_unique)) print("The unique instances of home ownership are: {}".format(home_own_unique)) print("The unique instances of verification status are: {}".format(ver_status_unique)) print("The unique instances of loan status are: {}".format(loan_unique)) print("The unique instances of loan purpose are: {}".format(purpose_unique)) print("The unique instances of address state are: {}".format(add_state_unique))<jupyter_output>The unique instances of loan term are: [' 36 months' ' 60 months'] The unique instances of loan grades are: ['B' 'C' 'A' 'E' 'F' 'D' 'G'] The unique instances of employment length are: ['10+ years' '< 1 year' '3 years' '9 years' '4 years' '5 years' '1 year' '6 years' '2 years' '7 years' '8 years'] The unique instances of home ownership are: ['RENT' 'OWN' 'MORTGAGE' 'OTHER' 'NONE' 'ANY'] The unique instances of verification status are: ['Verified' 'Source Verified' 'Not Verified'] The unique instances of loan status are: ['Fully Paid' 'Charged Off'] The unique instances of loan purpose are: ['credit_card' 'car' 'small_business' 'other' 'wedding' 'debt_consolidation' 'home_improvement' 'major_purchase' 'medical' 'moving' 'vacation' 'house' 'renewable_energy' 'educational'] The unique instances of address state are: ['AZ' 'GA' 'IL' 'CA' 'TX' 'VA' 'MO' 'CT' 'UT' 'FL' 'NY' 'PA' 'MN' 'NJ' 'OR' 'KY' 'OH' 'SC' 'RI' 'LA' 'MA' 'WA' 'WI' 'AL' 'NV' 'AK' 'CO' 'MD' 'WV' 'VT' 'MI' 'DC' 'SD' 'NC'[...]<jupyter_text>Now, mapping dictionary is set up to convert each unique instance from the features above to integers.<jupyter_code>mapping_dict = { "term": { " 36 months": 1, " 60 months": 0 }, "grade": { "A": 1, "B": 2, "C": 3, "D": 4, "E": 5, "F": 6, "G": 7 }, "loan_status": { "Fully Paid": 1, "Charged Off": 0 }, "emp_length": { "10+ years": 11, "9 years": 10, "8 years": 9, "7 years": 8, "6 years": 7, "5 years": 6, "4 years": 5, "3 years": 4, "2 years": 3, "1 year": 2, "< 1 year": 1, "nan": 0 }, "home_ownership": { "RENT": 1, "OWN": 2, "MORTGAGE": 3, "OTHER": 4, "NONE": 5, "ANY": 6 }, "verification_status": { "Verified": 1, "Source Verified": 2, "Not Verified": 3 }, "purpose": { "credit_card": 1, "car": 2, "small_business": 3, "other": 4, "wedding": 5, "debt_consolidation": 6, "home_improvement": 7, "major_purchase": 8, "medical": 9, "moving": 10, "vacation": 11, "house": 12, "renewable_energy": 13, "educational": 14 }, "addr_state": { 'AZ': 1,'GA': 2, 'IL': 3, 'CA': 4, 'OR': 5, 'NC': 6, 'TX': 7, 'VA': 8, 'MO': 9, 'CT': 10, 'UT': 11, 'FL': 12, 'NY': 13, 'PA': 14, 'MN': 15, 'NJ': 16, 'KY': 17, 'OH': 18, 'SC': 19, 'RI': 20, 'LA': 21, 'MA': 22, 'WA': 23, 'WI': 24, 'AL': 25, 'CO': 26, 'KS': 27, 'NV': 28, 'AK': 29, 'MD': 30, 'WV': 31, 'VT': 32, 'MI': 33, 'DC': 34, 'SD': 35, 'NH': 36, 'AR': 37, 'NM': 38, 'MT': 39, 'HI': 40, 'WY': 41, 'OK': 42, 'DE': 43, 'MS': 44, 'TN': 45, 'IA': 46, 'NE': 47, 'ID': 48, 'IN': 49, 'ME': 50, 'ND': 51} } updated_loans = updated_loans.replace(mapping_dict) updated_loans.head()<jupyter_output><empty_output><jupyter_text>From the data shape below, we see there are now 242,863 rows in the cleaned dataset.In the previous section, we saw that the original dataset containing only "Charged Off" and "Fully Paid" loans contained 252,972 rows & 52 columns. Approx 4percent of rows with missing values were removed.<jupyter_code>print("The data shape of the cleaned dataset is: {}.".format(updated_loans.shape)) updated_loans.head()<jupyter_output>The data shape of the cleaned dataset is: (242863, 17). <jupyter_text>From the heatmap figure below (and with ref to the data disctionary), we see the strongest positive correlation is between loan amount and installment (0.95). This is followed by strong positive correlation between the no. of currently open credit lines and the total no of credit lines (0.67). Some moderately positive correlations are between: * loan amount and total credit revolving balance (0.33) * installment and annual income (0.32) * installment and total credit revolving balance(0.32) * dti(debt to income) and no of open credit lines(0.31) Highest negative correlation is between loan grade and term (-0.47). This is followed by loan amount and term (-0.40). Some other moderately strong negative correlations are between: * loan amount and verification status (if income source was verified by lending club or not) (-0.39) * installment and verification status (-0.35)<jupyter_code>plt.figure(figsize=(10, 10)) sns.heatmap(updated_loans.corr(), annot=True, fmt=".2f") plt.title("Heatmap of all cleaned data features correlations"); plt.show() updated_loans.to_csv("data/cleaned_loan_data.csv",index=False)<jupyter_output><empty_output><jupyter_text>### Section 5: Machine Learning Models Two machine models are developed: **1. K-Nearest Neighbors (kNN) model** **2. Logistic Regression model**Begin by defining features and target variable for the models. In this case, **target variable (y**) is the **loan status**. Features variables (X) are all columns in the cleaned dataset (excluding target variable). Then, split the data into train/test sets and look at data shape.<jupyter_code>cleaned_data = pd.read_csv("data/cleaned_loan_data.csv") X = cleaned_data.loc[:, cleaned_data.columns != "loan_status"] y = cleaned_data.loc[:, "loan_status"] X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.20, random_state=42) print(X_train.shape) print(X_test.shape) print(y_train.shape) print(y_test.shape)<jupyter_output>(194290, 16) (48573, 16) (194290,) (48573,) <jupyter_text>### KNN models We will look at 3 different KNN models: 1. KNN(n=1) 2. KNN(n=10) 3. KNN model with the best n value. **1. KNN (n=1) model.**We see that KNN(n=1) model has a 72% accuracy of correctly identifying new loans as good or bad loans. Index for nearest neighbour of observations are also listed.<jupyter_code># instantiate the estimator knn = KNeighborsClassifier(n_neighbors=1) # fit the model knn.fit(X_train, y_train) # make a prediction y_pred = knn.predict(X_test) print("Prediction for KNN(n=1) model is: {}.".format(y_pred)) # model score knn_score1 = metrics.accuracy_score(y_test, y_pred) print("Accuracy score using KNN(n=1): %.2f%%" % (knn_score1 * 100.0)) # create 2 new observations and use these to make predictions with the model. obs1 = np.array( [8000, 1, 180, 3, 7, 2, 45000, 2, 1, 10, 11.3, 5, 0, 6843, 35, 27]) obs1 = np.reshape(obs1, (1, -1)) obs2 = np.array([2600, 0, 190, 2, 4, 1, 27600, 3, 11, 5, 4, 6, 0, 1847.3, 23, 12.0]) obs2 = np.reshape(obs2, (1, -1)) print("The outcome prediction for 1st new loan observation using KNN(n=1) is: {}.".format(knn.predict(obs1))) print("The nearest neighbor for 1st new loan observation using KNN(n=1) is at: {}.".format(knn.kneighbors(obs1)[1])) print("The outcome prediction for 2nd new loan observation using KNN(n=1) is: {}.".format(knn.predict(obs2))) print("The nearest neighbor for 2nd new loan observation using KNN(n=1) is at: {}.".format(knn.kneighbors(obs2)[1]))<jupyter_output>Prediction for KNN(n=1) model is: [1 1 1 ... 1 1 0]. Accuracy score using KNN(n=1): 71.93% The outcome prediction for 1st new loan observation using KNN(n=1) is: [1]. The nearest neighbor for 1st new loan observation using KNN(n=1) is at: [[73873]]. The outcome prediction for 2nd new loan observation using KNN(n=1) is: [1]. The nearest neighbor for 2nd new loan observation using KNN(n=1) is at: [[76083]]. <jupyter_text>**2. KNN (n=5) model**We see that the KNN(n=15) model has a higher accuracy (80%) of predicting good and bad loans than KNN(n=1).It also predicts both new loan observations as being good loans. <jupyter_code>knn = KNeighborsClassifier(n_neighbors=5) knn.fit(X_train,y_train) y_pred = knn.predict(X_test) print("Prediction for KNN(n=10) model is: {}.".format(y_pred)) knn_score5 = metrics.accuracy_score(y_test, y_pred) print("Accuracy score using KNN(n=10): %.2f%%" % (knn_score5 * 100.0)) # Use the same observations created above to make predictions for new loans using KNN(n=10) model. print("The outcome prediction for first new loan observation using KNN(n=5) is: {}.".format(knn.predict(obs1))) print("The 5 nearest neighbors for 1st new loan observation using KNN(n=5) are at: {}.".format(knn.kneighbors(obs1)[1])) print("The outcome prediction for second new loan observation using KNN(n=5) is: {}.".format(knn.predict(obs2))) print("The 5 nearest neighbors for 2nd new loan observation using KNN(n=5) are at: {}.".format(knn.kneighbors(obs2)[1]))<jupyter_output>Prediction for KNN(n=10) model is: [1 1 1 ... 1 1 1]. Accuracy score using KNN(n=10): 79.68% The outcome prediction for first new loan observation using KNN(n=5) is: [1]. The 5 nearest neighbors for 1st new loan observation using KNN(n=5) are at: [[ 73873 31070 184436 140641 51238]]. The outcome prediction for second new loan observation using KNN(n=5) is: [1]. The 5 nearest neighbors for 2nd new loan observation using KNN(n=5) are at: [[76083 9433 95894 48337 55575]]. <jupyter_text>#### Plot all accuracy scores for KNN models, for k values ranging from 1 to 15. Highest accuracy score is obtained when n=11.<jupyter_code>k_range = range(1, 15) scores = [] for k in k_range: knn = KNeighborsClassifier(n_neighbors=k) knn.fit(X_train, y_train) y_pred = knn.predict(X_test) knn_score_total = metrics.accuracy_score(y_test, y_pred) scores.append(knn_score_total) plt.plot(k_range, scores) plt.xlabel("Value of n for KNN") plt.ylabel("Testing accuracy")<jupyter_output><empty_output><jupyter_text>**3. KNN (n=13) model**Based of the graph above, highest accuracy is when n=13. We created a KNN (n=13) model and calculate its accuracy score. Predictions are also made using the same 2 new observations created above.This model is a slightly higher accuracy rate (82%) than KNN(n=5), of correctly identifying whether new loans are good or bad. It also predicts both new observations are being good loans. <jupyter_code>knn = KNeighborsClassifier(n_neighbors=13) knn.fit(X_train,y_train) y_pred = knn.predict(X_test) print("Prediction for KNN(n=13) model is: {}.".format(y_pred)) knn_score11 = metrics.accuracy_score(y_test, y_pred) print("Accuracy score using KNN(n=13): %.2f%%" % (knn_score11 * 100.0)) # Use the same observations created above to make predictions for new loans using KNN(n=10) model. print("The outcome prediction for first loan observation using KNN(n=13) is: {}.".format(knn.predict(obs1))) print("The 13 nearest neighbors for 1st new loan observation using KNN(n=13) are at: {}.".format(knn.kneighbors(obs1)[1])) print("The outcome prediction for second loan observation using KNN(n=13) is: {}.".format(knn.predict(obs2))) print("The 13 nearest neighbors for 2nd new loan observation using KNN(n=13) are at: {}.".format(knn.kneighbors(obs2)[1])) # compare actual values to those predicted by the KNN(n=13) model. df_knn = pd.DataFrame({"Actual": y_test, "Predicted": y_pred}) df_knn.sample(n=10)<jupyter_output><empty_output><jupyter_text>**Confusion matrix for KNN model** From the figures below, we can see that this is a biased model. It has a very high sensitivity score, showing that it's able to predict good loans correctly 99% of the time. However, it is likely to predict bad loans correctly Model has a 18% chance of misclassifying loans as good or bad. Loans predicted as being good loans have approx 83% chance of being good loans.<jupyter_code>cm = metrics.confusion_matrix(y_test, y_pred) print("Confusion matrix for the KNN(n=11):") print(cm) TP = cm[1, 1] TN = cm[0, 0] FP = cm[0, 1] FN = cm[1, 0] sensitivity = TP / float(TP + FN) print("Sensitivity: %.2f%%" % (sensitivity * 100)) precision = TP / float(TP + FP) print("Precision: %.2f%%" % (precision * 100)) error = (FP + FN) / (TP + TN + FP + FN) print("Error: %.2f%%" % (error * 100)) specificity = TN / float(TN + FP) print("Specificity: %.2f%%" % (specificity * 100))<jupyter_output>Confusion matrix for the KNN(n=11): [[ 159 8374] [ 434 39606]] Sensitivity: 98.92% Precision: 82.55% Error: 18.13% Specificity: 1.86% <jupyter_text>**ROC curve and AUC score for KNN** From the close proximity of the ROC curve to the diagonal line and from the AUC score, it can be interpreted that test accuracy of this model is only a little better that chance.<jupyter_code># ROC curve y_pred_proba = knn.predict_proba(X_test)[:,1] fpr, tpr, thresholds = roc_curve(y_test, y_pred_proba) plt.plot([0,1],[0,1],'k--') plt.plot(fpr,tpr, label='Knn') plt.xlabel('fpr') plt.ylabel('tpr') plt.title('Knn(n_neighbors=13) ROC curve') plt.show() # Area under ROC curve roc_knn = roc_auc_score(y_test,y_pred_proba) print("Area under curve score for KNN(n=13)model is: %0.2f%%" % (roc_knn *100))<jupyter_output><empty_output><jupyter_text>### Logistic Regression model We see that the logistic regression model a marginally higher accuracy of making correct predictions of loan status, compared to that of the KNN model with the best accuracy score (n=13). It also predicts both new loan observations as being good loans.<jupyter_code># instantiate logistic regression. logreg = LogisticRegression() # fit logistic regression model to the training set. logreg.fit(X_train, y_train) # to predict the test set results. y_pred_class = logreg.predict(X_test) print("Prediction for the logistic regression model is: {}".format(y_pred_class)) # model score lm_accuracy = metrics.accuracy_score(y_test, y_pred_class) print("Accuracy score using Logistic Regression: %.2f%%" % (lm_accuracy * 100.0)) # Use the same observations created above for KNN models to make predictions for new loans using the logreg model. print("The outcome prediction for first loan observation using logreg model is: {}.".format(logreg.predict(obs1))) print("The outcome prediction for second loan observation using logreg model is: {}.".format(logreg.predict(obs2))) # compare actual values to those predicted by the KNN(n=13) model. df_log = pd.DataFrame({"Actual": y_test, "Predicted": y_pred_class}) df_log.sample(n=10)<jupyter_output><empty_output><jupyter_text>**Confusion matrix for logistic regression model** From the figures below, we can see that this is a biased model. It has a very high sensitivity score, showing that it's able to predict good loans correctly 99% of the time. However, it is likely to predict bad loans correctly Model has a 18% chance of misclassifying loans as good or bad. Loans predicted as being good loans have approx 83% chance of being good loans.<jupyter_code>cm_log = metrics.confusion_matrix(y_test, y_pred_class) print("Confusion matrix for Logistic Regression model:") print(cm_log) TP = cm_log[1, 1] TN = cm_log[0, 0] FP = cm_log[0, 1] FN = cm_log[1, 0] sensitivity = TP / float(TP + FN) print("Sensitivity: %.2f%%" % (sensitivity * 100)) precision = TP / float(TP + FP) print("Precision: %.2f%%" % (precision * 100)) error = (FP + FN) / (TP + TN + FP + FN) print("Error: %.2f%%" % (error * 100)) specificity = TN / float(TN + FP) print("Specificity: %.2f%%" % (specificity * 100))<jupyter_output>Confusion matrix for Logistic Regression model: [[ 6 8527] [ 12 40028]] Sensitivity: 99.97% Precision: 82.44% Error: 17.58% Specificity: 0.07% <jupyter_text>**ROC curve and AUC score for logistic regression** Test accuracy of this model at 58.6%, is a little higher than that for KNN(n=13). The logistic regression model is the slightly better model of the two models.<jupyter_code># ROC curve y_pred_proba_log = logreg.predict_proba(X_test)[:,1] fpr, tpr, thresholds = roc_curve(y_test, y_pred_proba_log) plt.plot([0,1],[0,1],'k--') plt.plot(fpr,tpr, label='Logistic Regression') plt.xlabel('fpr') plt.ylabel('tpr') plt.title('Logistic Regression ROC curve') plt.show() # Area under ROC curve roc_log = roc_auc_score(y_test,y_pred_proba_log) print("Area under curve score for logistic regression model is: %0.2f%%" % (roc_log *100))<jupyter_output><empty_output>
no_license
/loan-credit.ipynb
manieshablakey/loan-assessment-model
47
<jupyter_start><jupyter_text>## Condition sınıfının cinsiyet karşılaştırılması<jupyter_code>data.iloc[:23]["gender"].value_counts().plot.barh(); plt.title("Cinsiyet karşılaştırma") plt.xlabel("Veri sayısı") plt.ylabel("cinsiyetler (1 k, 2 e)") data.iloc[:23]["gender"].value_counts().plot.pie();<jupyter_output><empty_output><jupyter_text>## Control grubunun cinsiyet karşılaştırılması<jupyter_code>data.iloc[23:]["gender"].value_counts().plot.barh(); plt.title("Cinsiyet karşılaştırma") plt.xlabel("Veri sayısı") plt.ylabel("cinsiyetler (1 k, 2 e)") data.iloc[23:]["gender"].value_counts().plot.pie();<jupyter_output><empty_output><jupyter_text>## tedavi sonuçlarını birbirinden çıkarıp etkilerine bakalım<jupyter_code>data["madrs3"]=data["madrs1"]-data["madrs2"]<jupyter_output><empty_output><jupyter_text>## Çıkan sonuçlara acaba gün sayısı etki ediyor mu ?<jupyter_code>sbn.scatterplot(x="madrs3",y="days",data=data)<jupyter_output><empty_output><jupyter_text>#### Gün arttıkça tedavinin olumlu yanıtlar verdiği söylenebilir## acaba cinsiyetler tedavi sürecinde etkili mi ?<jupyter_code>sex=["man" if i==2 else "woman" for i in data.gender] data["sex"]=sex sbn.scatterplot(x="madrs3",y="days",hue="sex",data=data) sbn.barplot(x="days",y="madrs3",hue="sex",data=data); plt.legend(loc=2)<jupyter_output><empty_output><jupyter_text>## Yaş aralığına göre hastalık görülme durumu<jupyter_code>plt.plot(data["age"].unique(),data.groupby("age")["gender"].count(),"b-*") plt.title("Bütün veri seti sonucu oluşan grafik") plt.xlabel("yaş aralıkları") plt.ylabel("hastalık görülme sayısı")<jupyter_output><empty_output><jupyter_text>##### conditional ve control grubunun ayrı ayrı analiz edilmesi<jupyter_code>plt.plot(data.iloc[:23]["age"].unique(),data.iloc[:23].groupby("age")["gender"].count(),"g-*") plt.title("Condition grubu") plt.xlabel("yaş aralıkları") plt.ylabel("hastalık görülme sayısı") plt.plot(data.iloc[23:]["age"].unique(),data.iloc[23:].groupby("age")["gender"].count(),"r-*") plt.title("Control grubu") plt.xlabel("yaş aralıkları") plt.ylabel("hastalık görülme sayısı")<jupyter_output><empty_output><jupyter_text>## Yaş aralıklarının cinsiyetlere göre dağılımı<jupyter_code>sbn.barplot(x="age",y=data.age.index,hue="sex",data=data); plt.xlabel("yaş aralığı") plt.title("Yaş aralıklarının cinsiyetlere göre dağılımı")<jupyter_output><empty_output><jupyter_text>## madrs3 değişkeninin verilerle olan korelasyonu (sayısal ve grafiksel gösterimi)<jupyter_code>data.corr()["madrs3"] data.corr()["madrs3"].sort_values().plot.barh(); ## okul <jupyter_output><empty_output><jupyter_text>## conditional grubu hastaların eğitim durumu dağılımı<jupyter_code>data.iloc[:23]["edu"].value_counts()<jupyter_output><empty_output><jupyter_text>## eğitim durumu ile hastalık orantılı mı ?<jupyter_code>sbn.catplot(x="edu",y="madrs3",data=data);<jupyter_output><empty_output><jupyter_text>#### cinsiyeti 3. boyut olarak eklersek acaba eğitim durumuyla cinsiyet arasında ilişki var mı ?<jupyter_code>sbn.catplot(x="edu",y="madrs3",hue="sex",data=data); ## data.head() data["cat_afftype"]=data["afftype"] counter=0 for i in data[["afftype"]].values: if(i==1): data["cat_afftype"].iloc[counter]="bipolarr" elif(i==2): data["cat_afftype"].iloc[counter]="unipolar_depresif" elif(i==3): data["cat_afftype"].iloc[counter]="bipolar" counter+=1 data.head() data.cat_afftype=data.cat_afftype.astype(pd.CategoricalDtype()) data.info() data.cat_afftype.value_counts() data["afftype"].value_counts() sbn.catplot(x="cat_afftype",y="madrs3",data=data,hue="sex"); sbn.violinplot(x="sex",y="madrs3",data=data) sbn.barplot(x="cat_afftype",y="madrs3",hue="sex",data=data) data.head()<jupyter_output><empty_output><jupyter_text># çalışmak ve evlilik durumları depresyonu tedavisi sonucunu etkiler mi ?<jupyter_code>sbn.barplot(x="work",y="madrs3",hue="marriage",data=data) plt.legend(loc=4)<jupyter_output><empty_output><jupyter_text>## yaş grupları ile evlilik durumları depresyon tedavisi sonucunu etkiler mi ?<jupyter_code>sbn.barplot(x="age",y="madrs3",hue="marriage",data=data) plt.legend(loc=4) data["age"]=data["age"].astype(pd.CategoricalDtype(categories=['20-24', '25-29', '30-34', '35-39', '40-44', '45-49', '50-54', '55-59', '60-64', '65-69'],ordered=True)) data.age.iloc[:5] liste=[] for i in data.age.cat.codes: if(0<=i<3): liste.append(0) elif(i<5): liste.append(1) elif(i<7): liste.append(2) else: liste.append(3) data["new_age"]=liste<jupyter_output><empty_output><jupyter_text>## Yaşları kategorize edip bunun dağılımını ve cinsiyetin bu dağılıma etkisini inceleyelim<jupyter_code>plt.figure(figsize=(12,6)) plt.subplot(1,2,1) sbn.barplot(x="new_age",y="madrs3",hue="marriage",data=data) plt.subplot(1,2,2) sbn.barplot(x="new_age",y="madrs3",hue="sex",data=data) plt.legend(loc=4)<jupyter_output><empty_output><jupyter_text>## Ayakta tedavi görmek acaba yatarak tedavi görmekten daha mı iyi sonuç veriyor ?<jupyter_code>sbn.boxplot(x="inpatient",y="madrs3",data=data) plt.grid()<jupyter_output><empty_output><jupyter_text>## Acaba cinsiyet üzerinde bu ddağılım değişiyor mu ?<jupyter_code>sbn.boxplot(hue="sex",x="inpatient",y="madrs3",data=data) plt.grid()<jupyter_output><empty_output><jupyter_text>## Eğitim seviyesinin tedavi yöntemine etkisinin incelenmesi<jupyter_code>sbn.boxplot(hue="edu",x="inpatient",y="madrs3",data=data) plt.grid()<jupyter_output><empty_output><jupyter_text>## tedavi yöntemlerinin dağılımı<jupyter_code>data["inpatient"].value_counts().plot.pie()<jupyter_output><empty_output><jupyter_text>## Eğitim seviyesinin tedavi yöntemleri üzerine dağılımı<jupyter_code>sbn.countplot(x="inpatient",hue="edu",data=data)<jupyter_output><empty_output><jupyter_text>## Tedavisi olumlu oluşanların incelenmesi<jupyter_code>poz_result=data[data.madrs3>0] sta_result=data[data.madrs3==0] neg_result=data[data.madrs3<0] poz_result plt.figure(figsize=(12,6)) plt.subplot(1,2,1) sbn.barplot(x="new_age",y="madrs3",data=data); plt.title("kategorik yaş sınıflandırmasının dağılımı") plt.subplot(1,2,2) plt.title("cinsiyetler üzerine dağılım") sbn.barplot(x="new_age",y="madrs3",hue="sex",data=data); plt.figure(figsize=(12,6)) plt.subplot(1,2,1) plt.title("evliliğin üzerine dağılım") sbn.barplot(x="new_age",y="madrs3",hue="marriage",data=data); plt.subplot(1,2,2) plt.title("çalışma stilinin veri üzerine dağılım") sbn.barplot(x="new_age",y="madrs3",hue="work",data=data);<jupyter_output><empty_output><jupyter_text>## Stabil kalan hastaların incelenmesi<jupyter_code>sta_result<jupyter_output><empty_output><jupyter_text>## Negatif kalan hastaların incelenmesi<jupyter_code>neg_result<jupyter_output><empty_output>
no_license
/Depresyon Analizi.ipynb
omermacitt/Patika_
23
<jupyter_start><jupyter_text><jupyter_code># ! to change to command line environment !pip install bing-image-downloader ! mkdir images from bing_image_downloader import downloader downloader.download('monkey animal', limit=20, output_dir='images') downloader.download('pigeon bird', limit=20, output_dir='images') downloader.download('butterfly insect', limit=20, output_dir='images') import os import matplotlib.pyplot as plt import numpy as np from skimage.io import imread from skimage.transform import resize DATA = '/content/images' category = ['monkey animal','pigeon bird','butterfly insect'] flat_data =[] target = [] for i in category: path = os.path.join(DATA,i) print(path) for img in os.listdir(path): img_array = imread(os.path.join(path,img)) #showing images # print(img_array.shape) img_resized = resize(img_array,(150,150,3)) # 0-255 values will be normalised (0-1) flat_data.append(img_resized.flatten()) target.append(i) flat_data #os.listdir(path) #target plt.imshow(img_resized) flat_data[0] #imread('/content/images/monkey animal/Image_1.jpg') plt.imshow? import pandas as pd df = pd.DataFrame(flat_data) df df['Target'] = target x = df.drop('Target',axis=1).values y = df['Target'].values from sklearn.model_selection import train_test_split x_train, x_test, y_train, y_test = train_test_split(x,y, random_state = 0) x_test.shape from sklearn.svm import SVC model = SVC() from sklearn.model_selection import GridSearchCV param_grid = {'C': [1,10,100,1000],'kernel': ['rbf','linear','poly'],'gamma': [0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9] } grid = GridSearchCV(SVC(),param_grid) grid = grid.fit(x_train,y_train) grid.best_score_ grid.best_params_ pd.DataFrame(grid.cv_results_) model = SVC(kernel='rbf') model.fit(x_train,y_train) y_pred = model.predict(x_test) from sklearn.metrics import accuracy_score, confusion_matrix, classification_report accuracy_score(y_pred,y_test) from sklearn.linear_model import LogisticRegression from sklearn.svm import SVC model = SVC() model.fit(x_train,y_train) y_pred = model.predict(x_test) accuracy_score(y_pred,y_test) from sklearn.preprocessing import StandardScaler stand = StandardScaler() x_train = stand.fit_transform(x_train) x_test = stand.transform(x_test) model = LogisticRegression() model.fit(x_train,y_train) y_pred = model.predict(x_test) accuracy_score(y_pred,y_test) y_test[0] y_pred[0] img = imread('/content/test1.jpg') plt.imshow(img) x_test[0] img = resize(img,(150,150,3)) img_d = img.flatten() img_d = img_d.reshape(1,-1) img_d model.predict(img_d) confusion_matrix(y_pred,y_test) print(classification_report(y_pred,y_test)) <jupyter_output><empty_output>
no_license
/Minor_project.ipynb
Navya-89/Mini-Project
1
<jupyter_start><jupyter_text># Starbucks Capstone Challenge ### Introduction This data set contains simulated data that mimics customer behavior on the Starbucks rewards mobile app. Once every few days, Starbucks sends out an offer to users of the mobile app. An offer can be merely an advertisement for a drink or an actual offer such as a discount or BOGO (buy one get one free). Some users might not receive any offer during certain weeks. Not all users receive the same offer, and that is the challenge to solve with this data set. Your task is to combine transaction, demographic and offer data to determine which demographic groups respond best to which offer type. This data set is a simplified version of the real Starbucks app because the underlying simulator only has one product whereas Starbucks actually sells dozens of products. Every offer has a validity period before the offer expires. As an example, a BOGO offer might be valid for only 5 days. You'll see in the data set that informational offers have a validity period even though these ads are merely providing information about a product; for example, if an informational offer has 7 days of validity, you can assume the customer is feeling the influence of the offer for 7 days after receiving the advertisement. You'll be given transactional data showing user purchases made on the app including the timestamp of purchase and the amount of money spent on a purchase. This transactional data also has a record for each offer that a user receives as well as a record for when a user actually views the offer. There are also records for when a user completes an offer. Keep in mind as well that someone using the app might make a purchase through the app without having received an offer or seen an offer. ### Example To give an example, a user could receive a discount offer buy 10 dollars get 2 off on Monday. The offer is valid for 10 days from receipt. If the customer accumulates at least 10 dollars in purchases during the validity period, the customer completes the offer. However, there are a few things to watch out for in this data set. Customers do not opt into the offers that they receive; in other words, a user can receive an offer, never actually view the offer, and still complete the offer. For example, a user might receive the "buy 10 dollars get 2 dollars off offer", but the user never opens the offer during the 10 day validity period. The customer spends 15 dollars during those ten days. There will be an offer completion record in the data set; however, the customer was not influenced by the offer because the customer never viewed the offer. ### Cleaning This makes data cleaning especially important and tricky. You'll also want to take into account that some demographic groups will make purchases even if they don't receive an offer. From a business perspective, if a customer is going to make a 10 dollar purchase without an offer anyway, you wouldn't want to send a buy 10 dollars get 2 dollars off offer. You'll want to try to assess what a certain demographic group will buy when not receiving any offers. ### Final Advice Because this is a capstone project, you are free to analyze the data any way you see fit. For example, you could build a machine learning model that predicts how much someone will spend based on demographics and offer type. Or you could build a model that predicts whether or not someone will respond to an offer. Or, you don't need to build a machine learning model at all. You could develop a set of heuristics that determine what offer you should send to each customer (i.e., 75 percent of women customers who were 35 years old responded to offer A vs 40 percent from the same demographic to offer B, so send offer A).# Data Sets The data is contained in three files: * portfolio.json - containing offer ids and meta data about each offer (duration, type, etc.) * profile.json - demographic data for each customer * transcript.json - records for transactions, offers received, offers viewed, and offers completed Here is the schema and explanation of each variable in the files: **portfolio.json** * id (string) - offer id * offer_type (string) - type of offer ie BOGO, discount, informational * difficulty (int) - minimum required spend to complete an offer * reward (int) - reward given for completing an offer * duration (int) - time for offer to be open, in days * channels (list of strings) **profile.json** * age (int) - age of the customer * became_member_on (int) - date when customer created an app account * gender (str) - gender of the customer (note some entries contain 'O' for other rather than M or F) * id (str) - customer id * income (float) - customer's income **transcript.json** * event (str) - record description (ie transaction, offer received, offer viewed, etc.) * person (str) - customer id * time (int) - time in hours since start of test. The data begins at time t=0 * value - (dict of strings) - either an offer id or transaction amount depending on the record **Note:** If you are using the workspace, you will need to go to the terminal and run the command `conda update pandas` before reading in the files. This is because the version of pandas in the workspace cannot read in the transcript.json file correctly, but the newest version of pandas can. You can access the termnal from the orange icon in the top left of this notebook. You can see how to access the terminal and how the install works using the two images below. First you need to access the terminal: Then you will want to run the above command: Finally, when you enter back into the notebook (use the jupyter icon again), you should be able to run the below cell without any errors.<jupyter_code>import pandas as pd import numpy as np import math import json from matplotlib import pyplot as plt from matplotlib.pyplot import figure %matplotlib inline # read in the json files portfolio = pd.read_json('data/portfolio.json', orient='records', lines=True) profile = pd.read_json('data/profile.json', orient='records', lines=True) transcript = pd.read_json('data/transcript.json', orient='records', lines=True)<jupyter_output><empty_output><jupyter_text>## Data Cleaning & Preprocessing<jupyter_code># View the portfolio table for any issues portfolio<jupyter_output><empty_output><jupyter_text>The portfolio data is all visible above with no obvious issues to resolve.<jupyter_code># Determine how many records there are in the profile data profile.shape<jupyter_output><empty_output><jupyter_text>The profile dataset is larger and will require some more tools to clean<jupyter_code># check for missing values profile.isna().sum() # check for duplicates profile.duplicated().sum()<jupyter_output><empty_output><jupyter_text>No duplicates but 2175 missing income values. These will be dropped because the income will be used as a predictor in the model for this dataset and imputing the income based on the other data available would not be useful to training the model.<jupyter_code># drop nulls and check profile = profile.dropna() profile.isna().sum() # check the new number of records profile.shape # check for any customers under 18 profile[profile['age']<18].agg('count') # check for any customers over 90 profile[profile['age']>90].agg('count') # get the frequency distribution of the people over 90 profile[profile['age']>90].groupby(['age']).agg('count')<jupyter_output><empty_output><jupyter_text>There are a lot of customers over 90 but given the dataset has 17000 customers, 219 of them being over 90 years old is not unrealistic. Nor is it unrealistic that 5 people could be 101 years old. This information will be retained for the model <jupyter_code># get number of records in the transcript data table transcript.shape # extract the value column which has objects in each row values_df = transcript['value'] # convert the json objects in each row to a PD series values_df = values_df.apply(pd.Series) # merge the new values series to the existing table and drop the original value column transcript_clean = pd.concat([transcript.drop('value', axis=1),values_df],axis=1) # check the result transcript_clean # check how many null items are in offer_id transcript_clean['offer_id'].isna().sum() # the recieved offers use a different column name, offer id without the underscore. # fill the nulls from the offer_id column with the values from offer id to merge into one row transcript_clean['offer_id'].fillna(transcript_clean['offer id'], inplace = True) # check how many nulls are in offer_id after the fill transcript_clean['offer_id'].isna().sum() # drop the redundant columns transcript_cleaning = transcript_clean.drop(['offer id','amount','reward'], axis=1) # the remaining columns are person, event, offer_id and time. # The remaining nulls are in the offer_id field where the event is transaction, so these rows will be dropped transcript_cleaned = transcript_cleaning.dropna() # checking the final cleaned table transcript_cleaned # subsets from the cleaned table will be created for received, viewed and completed offers below received_df = transcript_cleaned[transcript_cleaned['event']=='offer received'].sort_values(by='person') viewed_df = transcript_cleaned[transcript_cleaned['event']=='offer viewed'].sort_values(by='person') completed_df = transcript_cleaned[transcript_cleaned['event']=='offer completed'].sort_values(by='person') # create a completed_offers field and initialise to 0. This will be the binary classifier completed_df['completed_offers']=0 # create a list of every person from the profile table person_list = [i for i in profile['id'].unique()] len(person_list) # some customers received the same offer more than once. Drop all but the first of each unique received offer completed_df = completed_df.sort_values(by='time').drop_duplicates(subset=['person','offer_id']).sort_values(by='person') # as above but for viewed offers viewed_df = viewed_df.sort_values(by='time').drop_duplicates(subset=['person','offer_id']).sort_values(by='person') # as above for received offers. This also includes informational offers which will not be used for modelling received_df = received_df.sort_values(by='time').drop_duplicates(subset=['person','offer_id']).sort_values(by='person') completed_df # Merge the completed offers and viewed offers dataframes, preserving the completed offers rows combined_df = pd.merge(left=completed_df, right=viewed_df, how='left', on=['person','offer_id']) combined_df # Not all customers and offer combinations are in the viewed offers data, where they are missing # the time will be filled with a large value, 1000 combined_df['time_y'].fillna(1000) # Create a new boolean array to see if the offer completed time is more than or equal to the view time completed_offers = combined_df['time_x']>=combined_df['time_y'] completed_offers # Convert the boolean to 0 and 1 completed_offers.map({False: 0, True:1}) # apply the mapping to the original mapping column combined_df['completed_offers']=completed_offers.map({False: 0, True:1}) combined_df # pivot the table to see each offer, for visualisation of the table only combined_df.pivot(index='person', columns=['offer_id'], values=['completed_offers']) # construct a dictionary of offer type for each ID offer_dict = {} for i in range(len(portfolio)): offer_dict[portfolio['id'].iloc[i]] = portfolio['offer_type'].iloc[i] offer_dict # map the offer type based on the ID, in the combined dataframe combined_df['offer_id'] = combined_df['offer_id'].map(offer_dict) # create an inverted flag of the completed offers, for the sorting step in the next cell combined_df['offer_flag'] = 1-combined_df['completed_offers'] # sort by person, offer ID and offer flag ascending, and drop all duplicate offers. # the offer ascending flag will start with 0, this is where completed_offers is 1, # thus ensuring we dont drop a completed offer is one exists offers_df=combined_df.sort_values(by=['person','offer_id','offer_flag']).drop_duplicates(subset=['person','offer_id']).sort_values(by='person') # the new table has a customer ID with one of each offer (BOGO and discount) # pivot the table, re-insert the person ID as a column and fill NaN classes with 0 pivoted_offers = offers_df.pivot(index='person', columns=['offer_id'], values=['completed_offers']) pivoted_offers['id'] = pivoted_offers.index pivoted_offers = pivoted_offers.fillna(0) # merge the pivoted table with the profile table for the customers we retained. profile_offers_df = pd.merge(left = pivoted_offers, right=profile, how='right', on='id') # check all 14,825 customers are retained with the new columns added profile_offers_df # rename the columns profile_offers_df.columns = ['id','bogo','discount','id2','gender','age','became_member_on','income'] # construct the final dataframe, dropping the redundant columns final_df = profile_offers_df.drop(['id2','gender','became_member_on'],axis=1) # fill the remaining NaN values with 0 (for customers who didnt complete any offers) final_df = final_df.fillna(0) # save the file final_df.to_csv('modelling_data.csv')<jupyter_output><empty_output><jupyter_text>## Analysis The final_df dataframe now has the information we need for modelling. the bogo and discount classes are identified for each person by id, along with their age and income. <jupyter_code># Load the file (if returning to this later) final_df = pd.read_csv('modelling_data.csv') # Plot the freq. distribution for customer age figure(figsize=(10,7)) plt.bar(profile.groupby('age').agg('count').index,profile.groupby('age').agg('count')['gender']) plt.xlabel('Age') plt.ylabel('Frequency') plt.title('Customer Age Distribution') # plot the distribution for customer salary figure(figsize=(10,7)) plt.plot(profile.groupby('income').agg('count').index,profile.groupby('income').agg('count')['gender']) plt.xlabel('Salary') plt.ylabel('Frequency') plt.title('Customer Salary Distribution') # Summarise the statistics of each class and the proportion of data in each class bogo1 = final_df[final_df['bogo']==1]['bogo'].agg('count') bogo0 = final_df[final_df['bogo']==0]['bogo'].agg('count') disc1 = final_df[final_df['discount']==1]['discount'].agg('count') disc0 = final_df[final_df['discount']==0]['discount'].agg('count') print('There are %d customers who viewed and completed BOGO and %d customers who did not view to complete the offer.\n\ There are %d customers who viewed and completed discount and %d customers who did not view to complete the offer. \n\ This gives us a fraction of %f and %f of BOGO and discount offers compeleted, respectively' %(bogo1,bogo0,disc1,disc0,(bogo1/(bogo1+bogo0)),(disc1/(disc1+disc0)))) # plot the BOGO classes for all customers using the age and salary figure(figsize=(12,10)) plt.scatter(final_df['age'],final_df['income'],s=5,c=final_df['bogo'],cmap='copper') plt.colorbar() plt.title('Customer BOGO Classes') plt.xlabel('Age') plt.ylabel('Salary') # plot the discount classes using age and salary figure(figsize=(12,10)) plt.scatter(final_df['age'],final_df['income'],s=5,c=final_df['discount'],cmap='winter') plt.colorbar() plt.title('Customer Discount Classes') plt.xlabel('Age') plt.ylabel('Salary')<jupyter_output><empty_output><jupyter_text>## Modelling<jupyter_code>from sklearn.linear_model import LogisticRegression from sklearn.tree import DecisionTreeClassifier from sklearn.ensemble import RandomForestClassifier from sklearn.neighbors import KNeighborsClassifier from sklearn.svm import SVC from sklearn.model_selection import train_test_split from sklearn.metrics import classification_report from sklearn.preprocessing import StandardScaler # define the X and Y data from the final dataframe XData = final_df.drop(['id','bogo','discount'], axis=1) #XData['gender']=XData['gender'].map({'F':0,'M':1,'O':2}) YData = final_df.drop(['id','age','income'],axis=1) # Scale the X data so the salary magnitude doesn't over-influence the fitting scaler = StandardScaler() scaler.fit(XData) Xscaled = scaler.transform(XData) # split the data into training and testing sets X_train, X_test, y_train, y_test = train_test_split(Xscaled, YData, test_size=0.33, random_state=42) # apply a logistic regression model for both offer types, then get the predictions and print the classification report LRmodelBOGO = LogisticRegression() LRmodelBOGO.fit(X_train,y_train['bogo']) LRmodelDisc = LogisticRegression() LRmodelDisc.fit(X_train,y_train['discount']) y_bogo_pred = LRmodelBOGO.predict(X_test) y_disc_pred = LRmodelDisc.predict(X_test) print(classification_report(y_test['bogo'],y_bogo_pred)) print(classification_report(y_test['discount'],y_bogo_pred)) # plot the training classes in the new scaled X space for BOGO figure(figsize=(12,10)) plt.scatter(X_train[:,0],X_train[:,1],s=5,c=y_train['bogo'],cmap='copper') plt.colorbar() plt.title('Customer BOGO Classes') plt.xlabel('Age') plt.ylabel('Salary') # plot the training classes for the new scaled X space for discounts figure(figsize=(12,10)) plt.scatter(X_train[:,0],X_train[:,1],s=5,c=y_train['discount'],cmap='winter') plt.colorbar() plt.title('Customer Discount Classes') plt.xlabel('Age') plt.ylabel('Salary') # plot the actual test classes for BOGO with the scaled test inputs figure(figsize=(12,10)) plt.scatter(X_test[:,0],X_test[:,1],s=5,c=y_test['bogo'],cmap='copper') plt.colorbar() plt.title('Customer BOGO Actual Classes') plt.xlabel('Age') plt.ylabel('Salary') # plot the predicted BOGO classes from the LR model for the test input figure(figsize=(12,10)) plt.scatter(X_test[:,0],X_test[:,1],s=5,c=y_bogo_pred,cmap='copper') plt.colorbar() plt.title('Customer BOGO Prediction Classes') plt.xlabel('Age') plt.ylabel('Salary') # Plot the LR predicted classes for discount using the test input figure(figsize=(12,10)) plt.scatter(X_test[:,0],X_test[:,1],s=5,c=y_disc_pred,cmap='winter') plt.colorbar() plt.title('Customer Discount Prediction Classes') plt.xlabel('Age') plt.ylabel('Salary') # plot the actual discount classes for the test data figure(figsize=(12,10)) plt.scatter(X_test[:,0],X_test[:,1],s=5,c=y_test['discount'],cmap='winter') plt.colorbar() plt.title('Customer Discount Actual Classes') plt.xlabel('Age') plt.ylabel('Salary') # fit a random forest classifier with training data, then predict the classes and print report DTmodelBOGO = RandomForestClassifier(n_estimators=200) DTmodelBOGO.fit(X_train,y_train['bogo']) DTmodelDisc = RandomForestClassifier(n_estimators=200) DTmodelDisc.fit(X_train,y_train['discount']) y_bogo_pred = DTmodelBOGO.predict(X_test) y_disc_pred = DTmodelDisc.predict(X_test) print(classification_report(y_test['bogo'],y_bogo_pred)) print(classification_report(y_test['discount'],y_bogo_pred)) # random forest classifier BOGO predictions plot figure(figsize=(12,10)) plt.scatter(X_test[:,0],X_test[:,1],s=5,c=y_bogo_pred,cmap='copper') plt.colorbar() plt.title('Customer BOGO Prediction Classes') plt.xlabel('Age') plt.ylabel('Salary') # random forest classifier discount predictions plot figure(figsize=(12,10)) plt.scatter(X_test[:,0],X_test[:,1],s=5,c=y_disc_pred,cmap='winter') plt.colorbar() plt.title('Customer Discount Prediction Classes') plt.xlabel('Age') plt.ylabel('Salary') # as above but for the SVC models SVCmodelBOGO = SVC(kernel='rbf') SVCmodelBOGO.fit(X_train,y_train['bogo']) SVCmodelDisc = SVC(kernel='rbf') SVCmodelDisc.fit(X_train,y_train['discount']) y_bogo_pred = SVCmodelBOGO.predict(X_test) y_disc_pred = SVCmodelDisc.predict(X_test) print(classification_report(y_test['bogo'],y_bogo_pred)) print(classification_report(y_test['discount'],y_bogo_pred)) # SVC BOGO test data predictions figure(figsize=(12,10)) plt.scatter(X_test[:,0],X_test[:,1],s=5,c=y_bogo_pred,cmap='copper') plt.colorbar() plt.title('Customer BOGO Prediction Classes') plt.xlabel('Age') plt.ylabel('Salary') # SVC discount test data predictions figure(figsize=(12,10)) plt.scatter(X_test[:,0],X_test[:,1],s=5,c=y_disc_pred,cmap='winter') plt.colorbar() plt.title('Customer Discount Prediction Classes') plt.xlabel('Age') plt.ylabel('Salary') # as above but for KNN KNmodelBOGO = KNeighborsClassifier(n_neighbors=3) KNmodelBOGO.fit(X_train,y_train['bogo']) KNmodelDisc = KNeighborsClassifier(n_neighbors=3) KNmodelDisc.fit(X_train,y_train['discount']) y_bogo_pred = KNmodelBOGO.predict(X_test) y_disc_pred = KNmodelDisc.predict(X_test) print(classification_report(y_test['bogo'],y_bogo_pred)) print(classification_report(y_test['discount'],y_bogo_pred)) # KNN bogo test data predictions figure(figsize=(12,10)) plt.scatter(X_test[:,0],X_test[:,1],s=5,c=y_bogo_pred,cmap='copper') plt.colorbar() plt.title('Customer BOGO Prediction Classes') plt.xlabel('Age') plt.ylabel('Salary') # KNN discount test data predictions figure(figsize=(12,10)) plt.scatter(X_test[:,0],X_test[:,1],s=5,c=y_disc_pred,cmap='winter') plt.colorbar() plt.title('Customer Discount Prediction Classes') plt.xlabel('Age') plt.ylabel('Salary')<jupyter_output><empty_output>
no_license
/Starbucks_Capstone_notebook.ipynb
ravp90/UdacityStarbucksCapstone
8
<jupyter_start><jupyter_text> Binary classification as a phase separation process Rafael Monteiro Mathematics for advanced Materials - Open Innovation Lab (MathAM-OIL, AIST), Sendai, Japan email : [email protected], [email protected] This is a companion website to the article **Binary classification as a phase separation process**, by [Rafael Monteiro](https://sites.google.com/view/rafaelmonteiro-math/home). A preprint is available on arXiv https://arxiv.org/abs/2009.02467. For the dataset and trainable models, see the data repository at Zenodo. ## A few examples To begin with, I will introduce the model, giving some examples of its use. Let's first import some libraries<jupyter_code>from sklearn.model_selection import train_test_split, StratifiedKFold from sklearn.preprocessing import MinMaxScaler import matplotlib.gridspec as gridspec import matplotlib.pyplot as plt import matplotlib.pylab as pl import numpy as np import scipy.sparse as sc import sympy import itertools as it ###----------------------------------- ## The modules for this paper are here from binary_phase_separation import * from aux_fnts_for_jupyter_notebooks import * ###----------------------------------- ### In order to open ans save dictionaries try: import cPickle as pickle except ImportError: # python 3.x import pickle import pandas as pd import warnings warnings.filterwarnings(action = "ignore", message = "internal issue") ## Things necessary to do nice plots from matplotlib.ticker import ScalarFormatter, FuncFormatter from matplotlib.transforms import Affine2D from matplotlib import rcParams plt.rc('axes', labelsize = 18) plt.rc('xtick', labelsize = 16) plt.rc('ytick', labelsize = 16) plt.rc('font', size = 20) plt.rc('grid', alpha = 0.6) plt.rc('legend', fontsize = 18) rcParams['font.family'] = "Times New Roman" rcParams['mathtext.fontset'] = 'custom' rcParams['lines.linewidth'] = 2 rcParams['lines.markersize'] = 10 rcParams['lines.markeredgewidth'] = 2<jupyter_output><empty_output><jupyter_text> Nonlinear diffusion equations: some illustrative examples As discussed in Section 1.1 in the paper, the foundations of the model lie on nonlinear diffusion processes, which we briefly illustrate with an example. The heart of the model is the Allen-Cahn equation, a well-known equation in the field of pattern formation. Just to show how the code we have can be used in that case, we will plot the evolution of an initial boundary value problem, with Neumann boundary conditions. Let's see first take a look at the evolution of $$u_0(x) = \frac{1- \sin(\pi(2x - 1))}{2}$$ as an initial condition to the Allen-Cahn equation $$\partial_tu(x, t) = \varepsilon \partial_x^2u(x, t) + u(x, t)(1 − u(x, t))(u(x, t) − \alpha(x)).$$ The parameter $\alpha(\cdot)$ embodies medium heterogeneity. In this case, we choose $\alpha(x) = -2$, when $x <0.5$, and $\alpha(x)$ = 2, when $x \geq 0.5$. Parameters to the model are given below:<jupyter_code>N = 20 x = np.linspace(0, 1, N, endpoint = True) V_0 = 1/2 - 1/2 * np.reshape(np.sin(np.pi * (2 * x - 1)) , (-1,1)) prop = Propagate() dt, eps, Nx, Nt = 0.1, .3, N, 400 dx, ptt_cardnlty, weigths_k_sharing = x[1]-x[0], Nx, Nt<jupyter_output><empty_output><jupyter_text>Then we initialize parameters<jupyter_code>init = Initialize_parameters() param = init.dictionary(N, eps, dt, dx, Nt, ptt_cardnlty, weigths_k_sharing)<jupyter_output><empty_output><jupyter_text>If you read the paper you remember that trainable weights are the coefficients of this PDE. Since the model randomly initialize these coefficients, we will have to readjust them to the value we want. That's what we do in the next part of the code.<jupyter_code>for i in range(param["Nt"]): param["alpha_x_t"][:,i] = -2 * (x < .5) + 2 * (x >= .5)<jupyter_output><empty_output><jupyter_text>which we now run, using the numerical scheme (1.7a) in the paper. As poijnted out there, this is the same as doing a forward propagation: that's why you see the method "prop.forward" in the code below.<jupyter_code>flow, waterfall, time = prop.forward(V_0, param, waterfall_save = True , Flow_save = True) time = np.arange(Nt + 1) X, Y = np.meshgrid(x, time) flow = np.squeeze(flow, axis = 1) fig = plt.figure(figsize = (15,8)) ax = fig.add_subplot(111, projection = '3d') color = plt.cm.viridis(np.arange(N)) surf = ax.plot_wireframe(X, Y, flow.T, rstride = 10, cstride = 1,\ alpha = None, antialiased = True, linewidth = 3) ax.view_init(60, -40) plt.draw() surf.set_edgecolors(color) ax.set_xlabel('X Label') ax.set_ylabel('Y Label') ax.tick_params(which = 'both', labelsize = 16) ax.set_xlabel('x', size = 22, labelpad = 30) ax.set_ylabel('t', size = 22, labelpad = 30) ax.set_zlabel('u', size = 22, labelpad = 10) ax.set_zlim([0,1]) plt.show()<jupyter_output><empty_output><jupyter_text>### Propagation with randomly generated coefficientsBut let's go back: the PSBC initialize these coeficients in a randomized fashion. Let's see more or less what it looks like<jupyter_code>N = 1 init = Initialize_parameters() prop = Propagate() dt_vec = np.array([.1,.3,.57,1.5,3,4]) dt, eps, Nx, Nt, dx = .1, 0, N, 20, 1 ptt_cardnlty, weights_k_sharing = Nx, Nt param = init.dictionary(N, eps, dt, dx, Nt, ptt_cardnlty, weights_k_sharing) for i in range(param["Nt"]): param["alpha_x_t"][:,i] = np.random.uniform(0,1) n_points = 10 V_0 = np.reshape(1/n_points * np.arange(0, n_points + 1), (1, -1)) flow, waterfall, time = prop.forward(V_0, param, waterfall_save = True , Flow_save = True) # Setting a random seed for reproduction of graph, set np.random.seed(123) np.random.seed(8123) A = np.reshape(np.random.uniform(0,1, size = Nt), (1,-1)) x_ticks = np.arange(0,21,2) n_points = 10 V_0 = np.reshape(1/n_points*np.arange(0,n_points+1), (1,-1)) fig, ax = plt.subplots( ncols = 1, nrows = 6, figsize = (12,16), sharex = True, gridspec_kw = {'wspace':20} ) m, M = 0,1 for i in range(6): dt = dt_vec[i] param = init.dictionary(N, eps, dt, dx, Nt, ptt_cardnlty, weights_k_sharing) param["alpha_x_t"] = A flow, waterfall, time = prop.forward(V_0, param, waterfall_save = True , Flow_save = True) m, M = min(m, np.min(flow)), max(M, np.max(flow)) if i <3: ax[i].set_title(r"$\Delta_{\mathrm{t}}^{\mathrm{u}}$ = " + str(dt)) else: ax[i].set_title(r"$\Delta_{\mathrm{t}}^{\mathrm{*}}$ = " + str(dt)) ax[i].set_xlim([-.2,20.2]) ax[i].set_ylabel(r"$U^{[n]}(X;\alpha^{[n-1]})$") ax[i].set_xticks(x_ticks) if i == 5: ax[i].set_xlabel(r"$n$ (layer number)") ax[i].scatter(np.arange(0,param["alpha_x_t"].shape[1]),param["alpha_x_t"].T, marker='o',s = 30) ax[i].plot(flow[0,:].T) ax[i].grid(True, axis = 'x') ## Adjust height for i in range(6): ax[i].set_ylim([m,M]) plt.show()<jupyter_output><empty_output><jupyter_text> Applying the PSBC model to some toy problems As we did in the paper, we shall present the model in a simple toy problem, for illustrative purposes. We shall apply the PSBC later on to the MNIST dataset, where it has been trained. ### The 1D Rectangular box problem We shall work with a simple 1D model (the rectangular box problem), as the one used in the paper.<jupyter_code>folder = "Statistics/MNIST/" with open(folder + "parameters_MNIST_Neumann.p", 'rb') as fp: data = pickle.load(fp) ### GENERATE DATA gamma, N_data = .2, 2000 X = np.reshape(np.random.uniform(0, 1, N_data),(1, -1)) Y = np.array(X >= gamma, np.int, ndmin = 2) ### SPLIT DATA FOR CROSS VALIDATION A, B, C, D = train_test_split(X.T, Y.T, test_size = 0.2) #### We shall save one individual per column. We need to change that upon reading the csv later on X_train, X_test, Y_train, Y_test = A.T, B.T, C.T, D.T<jupyter_output><empty_output><jupyter_text>In this model, the data has to satisfy features dimension X number of elements in the sample<jupyter_code>np.shape(X_train)<jupyter_output><empty_output><jupyter_text>Now let's define the parameters<jupyter_code>learning_rate = (.1,.08,.93) patience = float("inf") sigma = .1 drop_SGD = 0.95 # See docstring of class "Binary_phase_separation" for further information epochs, dt, dx, eps, Nx, Nt = 600, .1, 1, 0, 1, 20 weights_k_sharing = Nt ptt_cardnlty = 1 batch_size = None subordinate, save_parameter_hist, orthodox_dt, with_phase = True, True, True, True<jupyter_output><empty_output><jupyter_text>and initialize the model<jupyter_code>Init = Initialize_parameters() data = Init.dictionary(Nx, eps, dt, dx, Nt, ptt_cardnlty, weights_k_sharing, sigma = sigma ) data.update({'learning_rate' : learning_rate, 'epochs' : epochs,\ 'subordinate' : subordinate,"patience" : patience,\ 'drop_SGD' : drop_SGD,"orthodox_dt" : orthodox_dt,'with_phase' : with_phase, "batch_size" : batch_size, "save_parameter_hist" : save_parameter_hist })<jupyter_output><empty_output><jupyter_text>We are finally ready to train the model. We do so using the class Binary_Phase_Separation<jupyter_code>Model = Binary_Phase_Separation()<jupyter_output><empty_output><jupyter_text>Of which you can learn more about by typing <jupyter_code>print(Model.__doc__)<jupyter_output> This is the main class of the Phase Separation Binary Classifier (PSBC). With its methods one can, aong other things, train the model and predict classifications (once the model has been trained). <jupyter_text>If the above is not enough you can do this:<jupyter_code>print(help(Model))<jupyter_output>Help on Binary_Phase_Separation in module binary_phase_separation object: class Binary_Phase_Separation(builtins.object) | Binary_Phase_Separation(cost=None, par_U_model=None, par_P_model=None, par_U_wrt_epochs=None, par_P_wrt_epochs=None) | | This is the main class of the Phase Separation Binary Classifier (PSBC). | With its methods one can, aong other things, train the model and | predict classifications (once the model has been trained). | | Methods defined here: | | __init__(self, cost=None, par_U_model=None, par_P_model=None, par_U_wrt_epochs=None, par_P_wrt_epochs=None) | Class initializer. | | Parameters | ---------- | cost : {bool, True}, optional | par_U_model : {dictionary, None}, optional | Dictionary containing initialization parameters for the U component | of the PSBC. | par_P_model : {dictionary, None}, optional | Dictionary containing initialization parameters for the P com[...]<jupyter_text>But this is maybe too much. So, let's say that you just want to know about how to train. You can get information only about that method<jupyter_code>print(Model.train.__doc__)<jupyter_output> 'train' method. This method trains the PSBC model with a given set of parameters and data. Parameters ---------- X : numpy.ndarray of size Nx X N_data Matrix with features. Y : numpy.ndarray of size 1 X N_data Matrix with labels. X_test : numpy.ndarray of size Nx X N_data_test Matrix with features. Y_test : numpy.ndarray of size 1 X N_data_test Matrix with labels. learning_rate : float or tuple If Tuple with three elements (a,b,c), these numbers parametrize the learning rate decay. dt : float Mesh grid size of time discretization dx : float Mesh grid size of spatial discretization. layers : int Number o f layers. weights_K_sharing : int Number of successive layers that are sharing their weights. eps : {float, 0}, optional [...]<jupyter_text>The method that we want is train. So, we do <jupyter_code>Model.train( X_train, Y_train, X_train, Y_train, learning_rate, dt, dx, Nt,\ weights_k_sharing, eps = eps, epochs = epochs, \ subordinate = subordinate, with_phase = with_phase,\ drop_SGD = drop_SGD, sigma = sigma,\ orthodox_dt = orthodox_dt, print_every = 300,\ save_parameter_hist = save_parameter_hist )<jupyter_output> epoch : 0 cost 0.11494985702898435 accuracy : 0.70375 epoch : 300 cost 0.022553932287346947 accuracy : 0.9775 <jupyter_text>If you want to take a look at how the cost function behaves over epochs, you can plot it as <jupyter_code>cost_over_epochs = Model.cost x = np.arange(len(cost_over_epochs)) f, ax = plt.subplots(figsize = (15,5)) ax.plot(x, cost_over_epochs, lw = 3) ax.set_title("Cost over epochs") ax.set_ylabel("Cost") ax.set_xlabel("Epochs") ax.grid(True) plt.show()<jupyter_output><empty_output><jupyter_text>And if you want to take a look at the behavior of the set $\mathscr{P}_{\alpha}$ you can also do. Just type<jupyter_code>diameter_history = Model.diameters_hist<jupyter_output><empty_output><jupyter_text>which will give you a dictionary with two keys: "U" and "P"<jupyter_code>diameter_history.keys()<jupyter_output><empty_output><jupyter_text>They concern the behavior of trainable weights for the U variable, and for the P variable. They can be plotted as <jupyter_code>fig, ax = plt.subplots( nrows = 2, figsize = (15,10)) colors = pl.cm.tab20(np.linspace(0,1,11)) ax[0].plot( diameter_history["U"], linestyle = '-', lw = 3,\ label = None, color = colors[0] ) ax[1].plot( diameter_history["P"], linestyle = (0,(3,1,1,1,1,1)), lw = 3,\ label = None, color = colors[1] ) fig.suptitle("Maximum of trainable weights evolution") ax[0].legend(loc = 4, fontsize = 16, ncol = 3) ax[0].set_ylabel(r'Diameter$\left(\mathscr{P}_{\alpha}^{[\mathrm{N_t}-1]}\right)$') ax[0].set_xlabel('Number of iterations') ax[0].grid(True) ax[1].set_ylabel(r'Diameter$\left(\mathscr{P}_{\beta}^{[\mathrm{N_t}-1]}\right)$') ax[1].set_xlabel('Number of iterations') ax[1].grid(True) plt.show()<jupyter_output>No handles with labels found to put in legend. Substituting with a symbol from Computer Modern. Substituting with a symbol from Computer Modern. Substituting with a symbol from Computer Modern. Substituting with a symbol from Computer Modern. <jupyter_text>This is the typical behavior of these quantities. Note that they remain constant (equal to 1) up to a certain point, and then they grow in a logarithmic shape. Note that the point of departure from the value 1 is different for both variables. That's because they have a separate dynamics, and are allowed to vary independently. Last, since we are saving parameters (setting the variable "save_parameter_hist = True") we can see the behvarior of the accuracy throughout epochs: we are saving all the parameters in the model at each epoch. We wemark that is applications it is better to set "save_parameter_hist = False" in order to save memory <jupyter_code>accuracies_fnt = Model.accuracies_hist x = np.arange(len(accuracies_fnt)) ## Plotting f, ax = plt.subplots(figsize = (15,5)) ax.plot(x, accuracies_fnt, lw = 3) f.suptitle("Accuracy over epochs") ax.set_ylabel("Accuracy") ax.set_xlabel("Epochs") ax.grid(True) plt.show()<jupyter_output><empty_output><jupyter_text>Note the the model peaks (reaches a point of high accuracy) before the final epoch. This natural "deterioration" is what lead researchers to design Early Stopping techniques. We can in fact know what that epoch was by typing<jupyter_code>Model.best_epoch<jupyter_output><empty_output><jupyter_text>Whose value was<jupyter_code>Model.best_accuracy<jupyter_output><empty_output><jupyter_text>that is, 100\% accuracy.If you want to retrieve the model parameters at such an epoch you just need to type<jupyter_code>best_P , best_U = Model.best_par_P_model, Model.best_par_U_model<jupyter_output><empty_output><jupyter_text>which will give the value of the parameters used when the model achieved its best performance.<jupyter_code>number_tests = Model.par_U_model["epochs"] accuracy_train, accuracy_test = [] , [] for j in range(number_tests - 1): _, aux_train, accuracy_train_now =\ Model.predict_and_accuracy( X_train,Y_train, Model.par_U_wrt_epochs[str(j)], Model.par_P_wrt_epochs[str(j)],\ subordinate = subordinate,with_phase = with_phase) _, aux_test, accuracy_test_now =\ Model.predict_and_accuracy( X_test, Y_test, Model.par_U_wrt_epochs[str(j)], Model.par_P_wrt_epochs[str(j)],\ subordinate = subordinate, with_phase = with_phase) # Accuracies accuracy_train.append(accuracy_train_now) accuracy_test.append(accuracy_test_now) x = np.arange(len(accuracy_train)) ## Plotting f, ax = plt.subplots(figsize = (15,8)) plt.plot(x, accuracy_train, lw = 3, label = "Train") plt.plot(x, accuracy_test, lw = 3, label = "Test") f.suptitle("Accuracy over epochs") ax.set_ylabel("Accuracy") ax.set_xlabel("Epochs") ax.legend(loc = 4) ax.grid(True) plt.show()<jupyter_output><empty_output><jupyter_text> The MNIST dataset <jupyter_code>######################################################################### ### READ MNIST DATASET TO PANDAS DATAFRAME AND THEN TO CSV FILE ######################################################################### data_train_MNIST = pd.read_csv('Examples/data_train_normalized_MNIST.csv') data_test_MNIST = pd.read_csv('Examples/data_test_normalized_MNIST.csv') X_train_MNIST = (data_train_MNIST.iloc[:,:-1]).to_numpy() Y_train_MNIST = np.reshape(data_train_MNIST.iloc[:,-1].to_numpy(), (1,-1)) X_test_MNIST = (data_test_MNIST.iloc[:,:-1]).to_numpy() Y_test_MNIST = np.reshape(data_test_MNIST.iloc[:,-1].to_numpy(), (1,-1)) X_train_MNIST, X_test_MNIST = X_train_MNIST.T , X_test_MNIST.T print(X_test_MNIST.shape, Y_test_MNIST.shape)<jupyter_output>(784, 2956) (1, 2956) <jupyter_text>See https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.MinMaxScaler.html#sklearn.preprocessing.MinMaxScaler<jupyter_code>(X_train_MNIST.min(axis = 0)[:5], X_train_MNIST.max(axis = 0)[:5]),\ (X_test_MNIST.min(axis = 0)[:5], X_test_MNIST.max(axis = 0)[:5]) print( "Train, 0:\t",len(np.squeeze(np.where(Y_train_MNIST == 0))[1,:])/Y_train_MNIST.shape[1],\ "\nTrain, 1: \t",len(np.squeeze(np.where(Y_train_MNIST == 1))[1,:])/Y_train_MNIST.shape[1] ) print( "Test, 0:\t",len(np.squeeze(np.where(Y_test_MNIST == 0))[1,:])/Y_test_MNIST.shape[1],\ "\nTest, 1: \t",len(np.squeeze(np.where(Y_test_MNIST == 1))[1,:])/Y_test_MNIST.shape[1] ) where_0 = np.squeeze(np.where(Y_train_MNIST == 0))[1,:10] where_1 = np.squeeze(np.where(Y_train_MNIST == 1))[1,:10] plt.figure(figsize = (15,10)) pick = np.asarray(where_0) zero_images = np.array([], dtype = np.int64).reshape(28,0) images = [np.reshape(X_train_MNIST[:,pick[image_index]], (28,28)) for image_index in range(10) ] for image in images: zero_images = np.concatenate([zero_images,image], axis = 1) pick = np.asarray(where_1) one_images = np.array([], dtype = np.int64).reshape(28,0) images = [np.reshape(X_train_MNIST[:,pick[image_index]], (28,28)) for image_index in range(10) ] for image in images: one_images = np.concatenate([one_images,image], axis = 1) both = np.concatenate([zero_images,one_images], axis = 0) plt.axis("off") plt.imshow(both, cmap = "binary") plt.show()<jupyter_output><empty_output><jupyter_text>One can also visualize the trainable weights as heatmaps.<jupyter_code>parent_folder = "Examples/" ## Non-subordinate ######################################################## sub_non_sub = {} folder_now = parent_folder + "W1S-NS/simulation1/" with open(folder_now + "Full_model_properties.p", 'rb') as fp: Full_model_properties = pickle.load(fp) sub_non_sub["non" + "best_par_P"] = Full_model_properties["best_par_P_model"] sub_non_sub["non" + "best_par_U"] = Full_model_properties["best_par_U_model"] ## Subordinate ############################################################ parent_folder = "Examples/" folder_now = parent_folder + "W1S-S/simulation1/" with open(folder_now + "Full_model_properties.p", 'rb') as fp: Full_model_properties = pickle.load(fp) sub_non_sub["sub" + "best_par_P"] = Full_model_properties["best_par_P_model"] sub_non_sub["sub" + "best_par_U"] = Full_model_properties["best_par_U_model"] ## W1S-Nt8 ################################################################## parent_folder = "Examples/" folder_now = parent_folder + "W1S-Nt8/simulation1/" with open(folder_now + "Full_model_properties.p", 'rb') as fp: Full_model_properties = pickle.load(fp) sub_non_sub["kfold" + "best_par_P"] = Full_model_properties["best_par_P_model"] sub_non_sub["kfold" + "best_par_U"] = Full_model_properties["best_par_U_model"] import seaborn as sns %matplotlib inline f, ax = plt.subplots(ncols = 3, nrows = 2, figsize = (15,10), constrained_layout = False) list_sub = ("non", "sub", "kfold") list_sub_big = ("Non-subordinate\n(Weights-1-sharing)",\ "Subordinate\n(Weights-1-sharing)",\ "Subordinate,"+ r"$\mathrm{N_t}=8$"+"\n"+r"(Weights-$1$-sharing)") m_1 = np.min([sub_non_sub[list_sub[i] + "best_par_P"]["alpha_x_t"].min() for i in range(3)]) M_1 = np.max([sub_non_sub[list_sub[i] + "best_par_P"]["alpha_x_t"].max() for i in range(3)]) for i in range(3): Nx = sub_non_sub[list_sub[i] + "best_par_P"]["alpha_x_t"].shape[0] sub_non_sub[str(i) + "matrix"] = sub_non_sub[list_sub[i] + "best_par_P"]["alpha_x_t"] sns.heatmap(\ sub_non_sub[str(i) + "matrix"], ax = ax[0,i], vmin = m_1, vmax = M_1, cbar = False,\ cmap = 'inferno' ) pcm = ax[0,i].pcolormesh(sub_non_sub[str(i) + "matrix"]) ax[0,i].set_title(list_sub_big[i], size = 20) f.subplots_adjust(right=0.9) cbar_ax = f.add_axes([.92, .55, .03, .3]) f.colorbar(pcm, cax = cbar_ax) ax[0,0].set_ylabel(r"$W_P^{[\cdot]} = \beta^{[\cdot]}$",rotation=90, size = 18) m_2 = np.min([sub_non_sub[list_sub[i] + "best_par_U"]["alpha_x_t"].min() for i in range(3)]) M_2 = np.max([sub_non_sub[list_sub[i] + "best_par_U"]["alpha_x_t"].max() for i in range(3)]) for i in range(3): Nx = sub_non_sub[list_sub[i] + "best_par_U"]["alpha_x_t"].shape[0] M = sub_non_sub[list_sub[i] + "best_par_U"]["alpha_x_t"] sub_non_sub[str(i) + "matrix"] = M sns.heatmap(M, ax = ax[1,i], vmin = m_2, vmax = M_2, cbar = False, cmap = 'inferno') pcM = ax[1,i].pcolormesh(sub_non_sub[str(i) + "matrix"]) cbar_ax2 = f.add_axes([.92, .15, .03, .3]) f.colorbar(pcM, cax = cbar_ax2) ax[1,0].set_ylabel(r"$W_U^{[\cdot]} = \alpha^{[\cdot]}$", rotation = 90, size = 18) plt.show()<jupyter_output><empty_output><jupyter_text> Retrieving some statistics <jupyter_code>parameters_MNIST_nondif, stats_folder_MNIST = {}, "Statistics/MNIST/" with open(stats_folder_MNIST + "parameters_MNIST_nondif.p", 'rb') as fp: parameters_MNIST_nondif = pickle.load(fp) parameters_MNIST_Neumann, stats_folder_MNIST = {}, "Statistics/MNIST/" with open(stats_folder_MNIST + "parameters_MNIST_Neumann.p", 'rb') as fp: parameters_MNIST_Neumann = pickle.load(fp) parameters_MNIST_Periodic, stats_folder_MNIST = {}, "Statistics/MNIST/" with open(stats_folder_MNIST + "parameters_MNIST_Periodic.p", 'rb') as fp: parameters_MNIST_Periodic = pickle.load(fp) help(accuracies)<jupyter_output>Help on function accuracies in module aux_fnts_for_jupyter_notebooks: accuracies(parameters, name, accuracy_type, number_folders=10, number_simulations=10) This function is only used in the jupyter notebook for the MNIST dataset Parameters ---------- parameters : dictionary Dictionary containing summary of data for some PSBC experiments. name : string Name of the keys of the dictionary "parameters" that we are studying, corresponding to a PSBC configuration. accuracy_type : string Either "best_accuracy_train" or "best_accuracy_test". number_folders : {int, 10}, optional Number of folders, where each folder corresponds of one value of the parameter being valued. number_simulations : {int, 10}, optional Number of simulations that were run with the same parameter, for statistical purposes. Returns ------- A : matrix Matrix with all the accuracies of type accuracy_type, wher[...]<jupyter_text>For more details about the code in the next plot, see Remark 1 (cell 20) in the jupyter-notebook **Notebook_PSBC_MNIST.ipynb**.<jupyter_code>A_train_1NS, value_of_parameter_varying = accuracies ( parameters_MNIST_nondif, "W1S-NS", "best_accuracy_train") value_of_parameter_varying = value_of_parameter_varying[::-1] average_train_1NS, stdev_train_1NS =\ np.mean(A_train_1NS, axis = 1)[::-1], np.std(A_train_1NS, axis = 1)[::-1] A_train_1S, _ = accuracies ( parameters_MNIST_nondif, "W1S-S", "best_accuracy_train") average_train_1S, stdev_train_1S = np.mean(A_train_1S, axis = 1)[::-1], np.std(A_train_1S, axis = 1)[::-1] A_train_NS, _ = accuracies ( parameters_MNIST_nondif, "WNtS-NS", "best_accuracy_train") average_train_NS, stdev_train_NS = np.mean(A_train_NS, axis = 1)[::-1], np.std(A_train_NS, axis = 1)[::-1] A_train_S, _ = accuracies ( parameters_MNIST_nondif, "WNtS-S", "best_accuracy_train") average_train_S, stdev_train_S = np.mean(A_train_S, axis = 1)[::-1], np.std(A_train_S, axis = 1)[::-1] colors = pl.cm.tab10(np.linspace(0,1,9)) colors_markers = pl.cm.tab10(np.linspace(0,1,9)) # Parameters using during pltting col_NS, mk_NS, alpha_for_all, L, T = 0, 1, 0.6, -3, .008 value_of_parameter_varying = np.asarray(value_of_parameter_varying) fig,ax = plt.subplots( nrows = 2, ncols = 2, figsize = (15,12), sharey='row', sharex = 'col',\ gridspec_kw = {'wspace':0,'hspace':0}, constrained_layout = False ) markers, caps, bars = ax[0,0].errorbar( (1 + T)*value_of_parameter_varying[:L], average_train_1NS[:L], marker = 'o',\ yerr = stdev_train_1NS[:L], errorevery = 1, linestyle = '-',\ label = "Non Sub", color = colors[col_NS+3],\ fillstyle = 'none', markeredgecolor = colors_markers[mk_NS-1] ) [bar.set_alpha(alpha_for_all) for bar in bars] markers, caps, bars = ax[0,0].errorbar( (1/(1 + T))*value_of_parameter_varying[:L], average_train_1S[:L], marker = 'x',\ yerr = stdev_train_1S[:L], errorevery = 1, linestyle = '-',\ label = "Sub", color = colors[col_NS],\ fillstyle = 'none', markeredgecolor = colors_markers[mk_NS+2] ) [bar.set_alpha(alpha_for_all) for bar in bars] markers, caps, bars = ax[0,1].errorbar( (1 + T)*value_of_parameter_varying[:L], average_train_NS[:L], marker = 'o',\ yerr = stdev_train_NS[:L], errorevery = 1, linestyle = '-',\ label = "Non Sub", color = colors[col_NS+3],\ fillstyle = 'none', markeredgecolor = colors_markers[mk_NS-1] ) [bar.set_alpha(alpha_for_all) for bar in bars] markers, caps, bars = ax[0,1].errorbar( (1/(1 + T))*value_of_parameter_varying[:L], average_train_S[:L], marker = 'x',\ yerr = stdev_train_S[:L], errorevery = 1, linestyle = '-',\ label = "Sub", color = colors[col_NS],\ fillstyle = 'none', markeredgecolor = colors_markers[mk_NS+2] ) [bar.set_alpha(alpha_for_all) for bar in bars] A_test_1NS, _ = accuracies (parameters_MNIST_nondif, "W1S-NS", "best_accuracy_test") average_test_1NS, stdev_test_1NS = np.mean(A_test_1NS, axis = 1)[::-1], np.std(A_test_1NS, axis = 1)[::-1] A_test_1S, _ = accuracies (parameters_MNIST_nondif, "W1S-S", "best_accuracy_test") average_test_1S, stdev_test_1S = np.mean(A_test_1S, axis = 1)[::-1], np.std(A_test_1S, axis = 1)[::-1] A_test_NS, _ = accuracies (parameters_MNIST_nondif, "WNtS-NS", "best_accuracy_test") average_test_NS, stdev_test_NS = np.mean(A_test_NS, axis = 1)[::-1], np.std(A_test_NS, axis = 1)[::-1] A_test_S, _ = accuracies (parameters_MNIST_nondif, "WNtS-S", "best_accuracy_test") average_test_S, stdev_test_S = np.mean(A_test_S, axis = 1)[::-1], np.std(A_test_S, axis = 1)[::-1] markers, caps, bars = ax[1,0].errorbar( (1 + T)*value_of_parameter_varying[:L], average_test_1NS[:L], marker = 'o', \ yerr = stdev_test_1NS[:L], errorevery = 1, linestyle = '-',\ label = "Non Sub", color = colors[col_NS+3],\ fillstyle = 'none', markeredgecolor = colors_markers[mk_NS-1] ) [bar.set_alpha(alpha_for_all) for bar in bars] markers, caps, bars = ax[1,0].errorbar( (1/(1 + T))*value_of_parameter_varying[:L], average_test_1S[:L], marker = 'x',\ yerr = stdev_test_1S[:L], errorevery = 1, linestyle = '-',\ label = "Sub", color = colors[col_NS],\ fillstyle = 'none', markeredgecolor = colors_markers[mk_NS+2] ) [bar.set_alpha(alpha_for_all) for bar in bars] markers, caps, bars = ax[1,1].errorbar( (1 + T)*value_of_parameter_varying[:L], average_test_NS[:L], marker = 'o', \ yerr = stdev_test_NS[:L], errorevery = 1, linestyle = '-',\ label = "Non-sub", color = colors[col_NS+3],\ fillstyle = 'none', markeredgecolor = colors_markers[mk_NS-1] ) [bar.set_alpha(alpha_for_all) for bar in bars] markers, caps, bars = ax[1,1].errorbar( (1/(1 + T))*value_of_parameter_varying[:L], average_test_S[:L], marker = 'x',\ yerr = stdev_test_S[:L], errorevery = 1, linestyle = '-',\ label = "Sub", color = colors[col_NS],\ fillstyle = 'none', markeredgecolor = colors_markers[mk_NS+2] ) [bar.set_alpha(alpha_for_all) for bar in bars] for i in [0,1]: for j in [0,1]: ax[i,j].grid(axis = "y") ax[i,j].set_xscale('log') ax[i,j].set_xticks(np.asarray(value_of_parameter_varying[:L])) ax[i,j].tick_params("x") ax[i,j].get_xaxis().set_major_formatter(ScalarFormatter()) ax[i,j].xaxis.set_minor_formatter(plt.matplotlib.ticker.NullFormatter()) ax[0,0].set_title(r"Weights-$1$-sharing", size = 18) ax[0,1].set_title(r"Weights-$\mathrm{N_t}$-sharing", size = 18) ax[0,0].set_ylabel("Accuracy train (average)") ax[1,0].set_ylabel("Accuracy test (average)") ax[1,0].set_xlabel(r"$\mathrm{N_{ptt}}$ (in logarithmic scale)") ax[1,1].set_xlabel(r"$\mathrm{N_{ptt}}$ (in logarithmic scale)") chartBox = ax[1,1].get_position() ax[1,1].set_position([chartBox.x0, chartBox.y0, chartBox.width*0.2, chartBox.height]) ax[1,1].legend(loc = 'upper center', bbox_to_anchor = (0, 0.15), shadow = True, ncol = 4) fig.set_tight_layout({'rect': [0, 0, 1, 0.95]}) fig.suptitle("Non-diffusive PSBC", size = 20) plt.show()<jupyter_output><empty_output><jupyter_text>Or in case you want to see the evolution of the maximum over epochs, for Periodic PSBC with Nt =1<jupyter_code>fig, ax = plt.subplots(nrows = 2, figsize = (15,10)) _, value_of_parameter_varying = accuracies ( parameters_MNIST_Periodic, "Per_W1S-Nt2", "best_accuracy_train", number_folders = 13) colors = pl.cm.tab20(np.linspace(0,1,16)) def test_label_value_of_parameter_varying(x): if x == 0: return "0" j = int(np.ceil(np.log2(x))) return r'$2^{{{0}}}$'.format(j) param = parameters_MNIST_Periodic["Per_W1S-Nt8"] for i in range(1, 14): param_now = param[str(i)] diam_hist_now = param_now['diam_hist'] ax[0].plot(diam_hist_now["U"], linestyle = '-', lw = 3,\ label = str(test_label_value_of_parameter_varying(value_of_parameter_varying[i-1])),\ color=colors[i] ) ax[1].plot(diam_hist_now["P"], linestyle = (0,(3,1,1,1,1,1)), lw = 3,\ label = str(test_label_value_of_parameter_varying(value_of_parameter_varying[i-1])),\ color = colors[i]) ax[0].legend(loc = 2, fontsize = 16, ncol = 3) ax[0].set_ylabel(r'Diameter$\left(\mathscr{P}_{\alpha}^{[\mathrm{N_t}-1]}\right)$') ax[0].set_xlabel('Number of iterations') ax[0].grid(True) ax[0].legend(loc = 2, ncol = 3, title = r"$\varepsilon$") plt.rcParams["legend.title_fontsize"] = 20 plt.rcParams["legend.columnspacing"] = .8 ax[1].set_ylabel(r'Diameter$\left(\mathscr{P}_{\beta}^{[\mathrm{N_t}-1]}\right)$') ax[1].set_xlabel('Number of iterations') ax[1].grid(True) plt.show()<jupyter_output>Substituting with a symbol from Computer Modern. Substituting with a symbol from Computer Modern. Substituting with a symbol from Computer Modern. Substituting with a symbol from Computer Modern. <jupyter_text>Or one could draw some confusion matrices. For example., the confution matrix for a realization of the diffusive PSBC with Neumann BCs, weights-1-sharing, and Nt = 8 is given below<jupyter_code>parent_folder = "Examples/" folder_now = parent_folder + "W1S-Nt8/simulation1/" with open(folder_now + "Full_model_properties.p", 'rb') as fp: Full_model_properties = pickle.load(fp) best_predic_vector_test_now = np.squeeze(Full_model_properties["best_predic_vector_test"]) real_tags = np.squeeze(Y_test_MNIST) from sklearn.metrics import confusion_matrix conf_matrix_example = confusion_matrix(real_tags, best_predic_vector_test_now ) fig, axs = plt.subplots( nrows = 1,ncols = 1, figsize = (10, 10)) cax_test = axs.matshow(conf_matrix_example, cmap = plt.cm.Blues) axs = sns.heatmap(conf_matrix_example/np.sum(conf_matrix_example),\ annot = True, annot_kws={"size": 20},\ fmt='.2%', cmap = 'Blues', ax = axs, cbar = False) plt.yticks(rotation = 0, fontsize = 18) plt.xticks(fontsize = 18) axs.xaxis.tick_top() # x axis on top axs.xaxis.set_label_position('top') axs.set_ylabel("True labels", fontsize = 22) axs.set_xlabel("Predicted labels", fontsize = 22) plt.show()<jupyter_output><empty_output><jupyter_text>### Predicting my own hadwritten 0 and 1So, as we have seen from the first example, this is a predictive model (that we refer to as PSBC) based on a reaction diffusion. In the second example we show the PSBC on a toy problem. As remarked extensively in the paper, we know that that is in fact quite a particular case of 1D problem where it performs well, because this is not expecited in general (see, in particular, Section 3.6 in the paper). To highlight the interplay between high-dimensionality of feature spaces and model compressibility, we have applied the model to the subset "0"-"1" of the MNIST database. To illustrate a bit more of the model's use and also play with the trainable weights, we will now predict the label for our own handwritten numbers. One of the original pictures is given belowIn fact, I wrote 6 number - 3 zeros, 3 ones - for this notebook. If you read the first papers of LeCun et al. about the MNIST project, there is a description of the way pictures were taken, so that they look the way they do in cell 36 of this Notebook: the images had to be controlled for centralization, light contrast, etc. This is part of the statistical design, which I tried to follow as close as possible. I cropped the pictures using [GIMP](https://www.gimp.org), a free software for image manipulation: you take a picture, crop it, go to image, set it into grayscale. Then you can adjust for light contrast and other things. And that's it. Now, with the cropped, grayscale jpg in hands, you proceed as in the next cell. I show two examples below.<jupyter_code>from PIL import Image im_array0 = np.asarray(Image.open("figures/my_0.jpg")) im_array1 = np.asarray(Image.open("figures/my_1.jpg")) fig, ax = plt.subplots(1,2) ax[0].imshow(im_array0, cmap='binary') ax[1].imshow(im_array1, cmap='binary') ax[0].axis(False) ax[1].axis(False) plt.show()<jupyter_output><empty_output><jupyter_text>Now we reshape this pictures as a 28 x 28 matrix.<jupyter_code>from PIL import Image def create_MNIST_type_figure(name): """Convert jpg figure to a (28,28) numpy array""" image = Image.open(name).convert('L') image2 = image.resize((28,28)) im2_as_array = 255- np.array(image2, dtype=np.uint8) print("image has shape", im2_as_array.shape) return im2_as_array my_0 = create_MNIST_type_figure("figures/my_0.jpg") my_0_v2 = create_MNIST_type_figure("figures/my_0_v2.jpg") my_0_v3 = create_MNIST_type_figure("figures/my_0_v3.jpg") my_1 = create_MNIST_type_figure("figures/my_1.jpg") my_1_v2 = create_MNIST_type_figure("figures/my_1_v2.jpg") my_1_v3 = create_MNIST_type_figure("figures/my_1_v3.jpg") fig, ax = plt.subplots(1,2) ax[0].imshow(my_0, cmap='binary') ax[1].imshow(my_1, cmap='binary') ax[0].axis(False) ax[1].axis(False) plt.show()<jupyter_output>image has shape (28, 28) image has shape (28, 28) image has shape (28, 28) image has shape (28, 28) image has shape (28, 28) image has shape (28, 28) <jupyter_text>Recall that we need to flatten these matrices,<jupyter_code>my_0_for_psbc = my_0.flatten(order='C') my_0_for_psbc_v2 = my_0_v2.flatten(order='C') my_0_for_psbc_v3 = my_0_v3.flatten(order='C') my_1_for_psbc = my_1.flatten(order='C') my_1_for_psbc_v2 = my_1_v2.flatten(order='C') my_1_for_psbc_v3 = my_1_v3.flatten(order='C')<jupyter_output><empty_output><jupyter_text>and we can then combine all of them as columns in a single matrix.<jupyter_code>combined_handwritten = \ np.c_[my_0_for_psbc, my_0_for_psbc_v2, my_0_for_psbc_v3, my_1_for_psbc, my_1_for_psbc_v2, my_1_for_psbc_v3]<jupyter_output><empty_output><jupyter_text>Now we load a PSBC model <jupyter_code>with open("Examples/W1S-Nt8/simulation1/Full_model_properties.p", 'rb') as fp: load_mnist = pickle.load(fp) psbc_testing = Binary_Phase_Separation() prediction = psbc_testing.predict(combined_handwritten, load_mnist["best_par_U_model"],load_mnist["best_par_P_model"]) print(prediction)<jupyter_output>[0 0 0 0 0 0] <jupyter_text>This seems really bad... but don't be worried! You should not forget: we need to satisfy the normalization conditions!! In fact, we are very far from that:<jupyter_code>np.max(combined_handwritten), np.min(combined_handwritten), combined_handwritten.shape<jupyter_output><empty_output><jupyter_text>So, let's normalize the data<jupyter_code>init_data = Initialize_Data() help(init_data)<jupyter_output>Help on Initialize_Data in module binary_phase_separation object: class Initialize_Data(builtins.object) | This class preprocess the data, normalizing it. | | Methods defined here: | | __init__(self) | Class initializer. No returned value. | | denormalize(self, Z, min_vals, max_vals, sigma=0.2) | 'denormalize' method. | | This method puts the data back to its original scale. | Of the non-normalized data the method uses its minimum value | min_vals, its original maxum value max_vals, and sigma. | The non-normalized data is transformed by | | A = ( 1 / sigma ) * ( Z - .5 + sigma /2) | | and then Z_2 = min_vals + A * (max_vals - min_vals). | | Z_2 is the returned value. | | Parameters | ---------- | | Returns | ------- | Non-normalized data 'A'. | | A : numpy.ndarray | | normalize(self, Z, sigma=0.2) | 'normalize' method.[...]<jupyter_text>So, the data get's normalized, but centered. By default, it gets rescaled in the range [0.4,0.6]. What we do then is: (i) we normalize it, then (ii) we add 0.1 to it.<jupyter_code>combined_handwritten_for_psbc, _, _ = init_data.normalize(combined_handwritten) combined_handwritten_for_psbc = 0.1+ combined_handwritten_for_psbc np.max(combined_handwritten_for_psbc), np.min(combined_handwritten_for_psbc) combined_handwritten_for_psbc.shape fig, ax = plt.subplots(1,6,figsize = (15,5)) for i in range(6): ax[i].imshow(np.reshape(combined_handwritten_for_psbc[:,i],(28,28)), cmap = 'binary') ax[i].axis("off") for name in ["W1S-NS", "W1S-S", "WNtS-NS", "WNtS-S",\ "W1S-Nt2", "W1S-Nt4", "W1S-Nt8",\ "WNtS-Nt1","WNtS-Nt2", "WNtS-Nt4", "WNtS-Nt8",\ "Per_W1S-Nt2", "Per_W1S-Nt4", "Per_W1S-Nt8",\ "Per_WNtS-Nt1","Per_WNtS-Nt2", "Per_WNtS-Nt4", "Per_WNtS-Nt8"]: with open("Examples/"+name+"/simulation1/Full_model_properties.p", 'rb') as fp: load_mnist = pickle.load(fp) psbc_testing = Binary_Phase_Separation() prediction = \ psbc_testing.predict( combined_handwritten_for_psbc, load_mnist["best_par_U_model"], load_mnist["best_par_P_model"],\ subordinate = load_mnist["best_par_U_model"]["subordinate"] ) print("Model", name, " predicts", np.squeeze(prediction), "and correct is, [0 0 0 1 1 1]" )<jupyter_output>Model W1S-NS predicts [0 0 0 0 1 1] and correct is, [0 0 0 1 1 1] Model W1S-S predicts [0 0 0 0 1 1] and correct is, [0 0 0 1 1 1] Model WNtS-NS predicts [0 0 0 0 1 1] and correct is, [0 0 0 1 1 1] Model WNtS-S predicts [0 0 0 0 1 1] and correct is, [0 0 0 1 1 1] Model W1S-Nt2 predicts [0 0 0 0 1 1] and correct is, [0 0 0 1 1 1] Model W1S-Nt4 predicts [0 0 0 0 1 1] and correct is, [0 0 0 1 1 1] Model W1S-Nt8 predicts [0 0 0 0 1 1] and correct is, [0 0 0 1 1 1] Model WNtS-Nt1 predicts [0 0 0 0 1 1] and correct is, [0 0 0 1 1 1] Model WNtS-Nt2 predicts [0 0 0 0 1 1] and correct is, [0 0 0 1 1 1] Model WNtS-Nt4 predicts [0 0 0 0 1 1] and correct is, [0 0 0 1 1 1] Model WNtS-Nt8 predicts [0 0 0 0 1 1] and correct is, [0 0 0 1 1 1] Model Per_W1S-Nt2 predicts [0 0 0 0 1 1] and correct is, [0 0 0 1 1 1] Model Per_W1S-Nt4 predicts [0 0 0 0 1 1] and correct is, [0 0 0 1 1 1] Model Per_W1S-Nt8 predicts [0 0 0 0 1 1] and correct is, [0 0 0 1 1 1] Model Per_WNtS-Nt1 predicts [0 0 0 [...]
no_license
/PSBC_v1/Notebook_PSBC_examples.ipynb
rafael-a-monteiro-math/Binary_classification_phase_separation
39
<jupyter_start><jupyter_text># Single Layer Neural Network 이번 시간에는 딥러닝 알고리즘의 가장 기본이 되는 인공신경망(artificial neural network, ANN), 그 중에서도 single-layer neural network 모델을 구현합니다. 오늘은 크게 크게 세 가지 방식, 1) Random Search, 2) h-step Search, 3) Gradient Descent 로 모델을 학습하는 법을 배우며, 이 중에 어떤 것이 가장 좋고 어떤 것을 선택해야하는지를 배웁니다. <jupyter_code>import numpy as np<jupyter_output><empty_output><jupyter_text>## Case 1 - 0.3 x X1 + 0.5 x X2### Load Dataset<jupyter_code>x1 = np.random.rand(100) print(x1.shape) x1[:10] x2 = np.random.rand(100) print(x2.shape) x2[:10] y = 0.3 * x1 + 0.5 * x2 + 0.1 print(y.shape) y[:10]<jupyter_output>(100,) <jupyter_text>### First idea: Random Search<jupyter_code>num_epoch = 10000 best_error = np.inf best_epoch = None best_w1 = None best_w2 = None best_b = None for epoch in range(num_epoch): w1 = np.random.uniform(low=-1.0, high=1.0) w2 = np.random.uniform(low=-1.0, high=1.0) b = np.random.uniform(low=-1.0, high=1.0) y_predict = x1 * w1 + x2 * w2 + b error = np.abs(y_predict - y).mean() if error < best_error: best_error = error best_epoch = epoch best_w1 = w1 best_w2 = w2 best_b = b print("{0:4} w1 = {1:.5f}, w2 = {2:.5f}, b = {3:.5f}, error = {4:.5f}".format(epoch, w1, w2, b, error)) print("----" * 15) print("{0:4} w1 = {1:.5f}, w2 = {2:.5f}, b = {3:.5f}, error = {4:.5f}".format(best_epoch, best_w1, best_w2, best_b, best_error))<jupyter_output> 0 w1 = 0.16720, w2 = 0.10272, b = -0.72859, error = 1.09869 1 w1 = 0.82804, w2 = 0.53150, b = 0.84809, error = 1.03204 2 w1 = 0.80962, w2 = 0.07287, b = -0.77298, error = 0.83242 4 w1 = -0.18429, w2 = 0.12042, b = 0.37839, error = 0.19042 7 w1 = 0.05444, w2 = 0.00767, b = 0.40372, error = 0.14693 21 w1 = -0.14689, w2 = 0.21581, b = 0.45108, error = 0.13260 60 w1 = 0.03061, w2 = 0.57022, b = 0.07129, error = 0.13072 96 w1 = -0.06154, w2 = 0.79950, b = 0.06346, error = 0.12450 216 w1 = 0.65567, w2 = 0.26152, b = 0.12255, error = 0.12167 264 w1 = 0.12405, w2 = 0.74461, b = 0.15717, error = 0.10481 409 w1 = 0.58461, w2 = 0.44052, b = -0.07694, error = 0.08539 785 w1 = 0.55593, w2 = 0.31329, b = 0.09222, error = 0.07776 806 w1 = 0.28493, w2 = 0.56734, b = 0.13940, error = 0.06612 1001 w1 = 0.20557, w2 = 0.39686, b = 0.16333, error = 0.04413 1464 w1 = 0.43189, w2 = 0.49839, b = 0.01952, error = 0.03420 5299 w1 = 0.28513, w2 = 0.58166, b = 0.06983, error = 0.02158 85[...]<jupyter_text>### Case 2 - h-step Search<jupyter_code>num_epoch = 15000 w1 = np.random.uniform(low=-1.0, high=1.0) w2 = np.random.uniform(low=-1.0, high=1.0) b = np.random.uniform(low=-1.0, high=1.0) h = 0.01 for epoch in range(num_epoch): y_predict = x1 * w1 + x2 * w2 + b current_error = np.abs(y_predict - y).mean() if current_error < 0.005: break y_predict = x1 * (w1 + h) + x2 * w2 + b h_plus_error = np.abs(y_predict - y).mean() if h_plus_error < current_error: w1 = w1 + h else: y_predict = x1 * (w1 - h) + x2 * w2 + b h_minus_error = np.abs(y_predict - y).mean() if h_minus_error < current_error: w1 = w1 - h y_predict = x1 * w1 + x2 * (w2 + h) + b h_plus_error = np.abs(y_predict - y).mean() if h_plus_error < current_error: w2 = w2 + h else: y_predict = x1 * w1 + x2 * (w2 - h) + b h_minus_error = np.abs(y_predict - y).mean() if h_minus_error < current_error: w2 = w2 - h y_predict = x1 * w1 + x2 * w2 + (b + h) h_plus_error = np.abs(y_predict - y).mean() if h_plus_error < current_error: b = b + h else: y_predict = x1 * w1 + x2 * w2 + (b - h) h_minus_error = np.abs(y_predict - y).mean() if h_minus_error < current_error: b = b - h print("{0} w1 = {1:.5f}, w2 = {2:.5f} b = {3:.5f} error = {4:.5f}".format(epoch, w1, w2, b, current_error))<jupyter_output>109 w1 = 0.29811, w2 = 0.51574 b = 0.09606 error = 0.00472 <jupyter_text>### Third Idea - Gradient Descent<jupyter_code>num_epoch = 100 learning_rate = 1.1 w1 = np.random.uniform(low=-1.0, high=1.0) w2 = np.random.uniform(low=-1.0, high=1.0) b = np.random.uniform(low=-1.0, high=1.0) for epoch in range(num_epoch): y_predict = x1 * w1 + x2 * w2 + b error = np.abs(y_predict - y).mean() if error < 0.005: break w1 = w1 - learning_rate * ((y_predict - y) * x1).mean() w2 = w2 - learning_rate * ((y_predict - y) * x2).mean() b = b - learning_rate * (y_predict - y).mean() if epoch % 10 == 0: print("{0:2} w1 = {1:.5f}, w2 = {2:.5f} b = {3:.5f} error = {4:.5f}".format(epoch, w1, w2, b, error)) print("----" * 15) print("{0:2} w1 = {1:.5f}, w2 = {2:.5f} b = {3:.5f} error = {4:.5f}".format(epoch, w1, w2, b, error)) num_epoch = 100 learning_rate = 1.1 w1 = np.random.uniform(low=-1.0, high=1.0) w2 = np.random.uniform(low=-1.0, high=1.0) b = np.random.uniform(low=-1.0, high=1.0) for epoch in range(num_epoch): y_predict = x1 * w1 + x2 * w2 + b error = np.abs(y_predict - y).mean() if error < 0.005: break w1 = w1 - learning_rate * ((y_predict - y) * x1).mean() w2 = w2 - learning_rate * ((y_predict - y) * x2).mean() b = b - learning_rate * (y_predict - y).mean() if epoch % 10 == 0: print("{0:2} w1 = {1:.5f}, w2 = {2:.5f} b = {3:.5f} error = {4:.5f}".format(epoch, w1, w2, b, error)) print("----" * 15) print("{0:2} w1 = {1:.5f}, w2 = {2:.5f} b = {3:.5f} error = {4:.5f}".format(epoch, w1, w2, b, error))<jupyter_output> 0 w1 = -0.28433, w2 = 0.17431 b = 1.00120 error = 0.54307 10 w1 = -0.06276, w2 = 0.24165 b = 0.44684 error = 0.12318 20 w1 = 0.11247, w2 = 0.35496 b = 0.27956 error = 0.06483 30 w1 = 0.20332, w2 = 0.42073 b = 0.19483 error = 0.03415 40 w1 = 0.24989, w2 = 0.45708 b = 0.15014 error = 0.01801 50 w1 = 0.27391, w2 = 0.47691 b = 0.12651 error = 0.00950 60 w1 = 0.28636, w2 = 0.48763 b = 0.11401 error = 0.00502 ------------------------------------------------------------ 61 w1 = 0.28636, w2 = 0.48763 b = 0.11401 error = 0.00471
no_license
/01_01-single-layer-neural-network-for-regression.ipynb
ikarus-999/DeepLearning01
5
<jupyter_start><jupyter_text># Short URL on this Colab: https://bit.ly/aieat-pycaret2# PyCaret 3.0 - Week 2 ## พัฒนา Model AI แบบ Low code ด้วย PyCaret 1. PyCaret Classification 2. PyCaret Time Series 3. PyCaret Clustering# ติดตั้ง PyCaret ติดตั้ง Pycaret ผ่าน pip (Package Installer for Python) ด้วยวิธีนี้<jupyter_code>!pip install --pre catboost pycaret<jupyter_output>Looking in indexes: https://pypi.org/simple, https://us-python.pkg.dev/colab-wheels/public/simple/ Collecting catboost Downloading catboost-1.2-cp310-cp310-manylinux2014_x86_64.whl (98.6 MB)  ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 98.6/98.6 MB 2.3 MB/s eta 0:00:00 [?25hCollecting pycaret Downloading pycaret-3.0.2-py3-none-any.whl (483 kB)  ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 483.6/483.6 kB 41.2 MB/s eta 0:00:00 [?25hRequirement already satisfied: graphviz in /usr/local/lib/python3.10/dist-packages (from catboost) (0.20.1) Requirement already satisfied: matplotlib in /usr/local/lib/python3.10/dist-packages (from catboost) (3.7.1) Requirement already satisfied: numpy>=1.16.0 in /usr/local/lib/python3.10/dist-packages (from catboost) (1.22.4) Requirement already satisfied: pandas>=0.24 in /usr/local/lib/python3.10/dist-packages (from catboost) (1.5.3) Requirement already satisfied: scipy in [...]<jupyter_text>**เมื่อลงเสร็จแล้ว ต้องกดปุ่ม Restart Runtime เพื่อให้มัน Update Matplotlib เป็น Version ล่าสุด** และรัน Cell ข้างล่างเพื่อรัน Libary ที่ต้องใช้<jupyter_code># Importing necessary libraries import pandas as pd import matplotlib.pyplot as plt import matplotlib as mpl import seaborn as sns from pycaret.datasets import get_data # ปรับ dpi ของกราฟให้ละเอียดมากพอ ไม่ให้รูปใหญ่เกิน Notebook Colab mpl.rcParams['figure.dpi'] = 300<jupyter_output><empty_output><jupyter_text># Time-Series ทำนายข้อมูลในอนาคตด้วยการทำนายราคา Bitcoin กันโดยดึงข้อมูลในรูปแบบ CSV มาจากเว็บ https://www.blockchain.com/charts/market-price ## Time-Series vs Regression? * Time-series **ไม่สามารถใช้วิธีการแบ่งข้อมูลแบบเดียวกับงาน Regression** เพราะ Regression/Classification ใช้วิธีแบ่งข้อมูล Stratified random sampling ซึ่งเนื่องจากข้อมูลในอนาคตที่เราต้องการทำนายจะ Leak เราต้องแบ่งเป็น Rolling / Sliding Windows บน Dataset เพื่อให้ Fair ในการวัดผล * **Model Algorithm?** Regression กับ Time-Series Data มีความใกล้เคียงกัน แต่อยู่บนสันนินาฐที่ต่างกัน อาทิเช่นเรื่องความเป็น Seasonal ที่ Time-Series Data Prediction เอามาใช้ประโยชน์ได้ และมี Model ที่สร้างมาเพื่องานนี้แตกต่างกัน<jupyter_code>! wget -O bitcoin.csv https://raw.githubusercontent.com/iapp-technology/training_datasets/main/market-price-all.csv<jupyter_output>--2022-08-14 07:49:46-- https://raw.githubusercontent.com/iapp-technology/training_datasets/main/market-price-all.csv Resolving raw.githubusercontent.com (raw.githubusercontent.com)... 185.199.108.133, 185.199.109.133, 185.199.110.133, ... Connecting to raw.githubusercontent.com (raw.githubusercontent.com)|185.199.108.133|:443... connected. HTTP request sent, awaiting response... 200 OK Length: 45473 (44K) [text/plain] Saving to: ‘bitcoin.csv’ bitcoin.csv 0%[ ] 0 --.-KB/s bitcoin.csv 100%[===================>] 44.41K --.-KB/s in 0.001s 2022-08-14 07:49:46 (57.5 MB/s) - ‘bitcoin.csv’ saved [45473/45473] <jupyter_text>เปิด Tab File ด้านขวา จะเจอไฟล์ชื่อ bitcoin.csv ทำการ Double Click เพื่อดูเนื้อหาได้เลย<jupyter_code>import pandas as pd import matplotlib.pyplot as plt import seaborn as sns # Load the dataset df = pd.read_excel("https://github.com/Benjamnk/DSE-2/raw/main/cane_area_MPV_TSR_date.xlsx") # View the first few rows of the dataset print(df.head()) # Get summary statistics of the numerical columns print(df.describe()) # Check the data types of the columns print(df.info()) # Check for missing values print(df.isnull().sum()) # Perform data visualization # Histogram of a numerical column plt.figure(figsize=(10, 6)) sns.histplot(df['area'], bins=30) plt.xlabel('Area') plt.ylabel('Count') plt.title('Distribution of Area') plt.show() # Scatter plot of two numerical columns plt.figure(figsize=(10, 6)) sns.scatterplot(x='cane_type', y='area', data=df) plt.xlabel('Cane Type') plt.ylabel('Area') plt.title('Area vs. Cane Type') plt.show() # Box plot of a numerical column by a categorical column plt.figure(figsize=(10, 6)) sns.boxplot(x='cane_type', y='area', data=df) plt.xlabel('Cane Type') plt.ylabel('Area') plt.title('Area by Cane Type') plt.show() # Correlation heatmap of numerical columns plt.figure(figsize=(10, 8)) sns.heatmap(df.corr(), annot=True, cmap='coolwarm') plt.title('Correlation Heatmap') plt.show() df.info() import pandas as pd import numpy as np # Load the dataset df = pd.read_excel("https://github.com/Benjamnk/DSE-2/raw/main/cane_area_MPV_TSR_date.xlsx") # Check for missing values print("Missing Values:") print(df.isnull().sum()) # Drop rows with missing values df = df.dropna() # Handling outliers def handle_outliers(data, column): q1 = np.percentile(data[column], 25) q3 = np.percentile(data[column], 75) iqr = q3 - q1 lower_bound = q1 - (1.5 * iqr) upper_bound = q3 + (1.5 * iqr) data = data[(data[column] >= lower_bound) & (data[column] <= upper_bound)] return data # Apply outlier handling to the 'area' column df = handle_outliers(df, 'area') # Reset the index after cleaning df = df.reset_index(drop=True) # Verify the cleaned dataset print("Cleaned Dataset:") print(df.head()) # Drop the unwanted columns columns_to_drop = ['year', 'cane_type', 'gis_idkey', 'QT'] df = df.drop(columns_to_drop, axis=1) # dataframe จัดกลุ่มรวมยอด ตามวันที่ df = df.groupby('date').sum() df.info df df.plot() df.info()<jupyter_output><class 'pandas.core.frame.DataFrame'> DatetimeIndex: 544 entries, 2018-10-03 to 2023-01-31 Data columns (total 1 columns): # Column Non-Null Count Dtype --- ------ -------------- ----- 0 area 544 non-null float64 dtypes: float64(1) memory usage: 8.5 KB <jupyter_text>## Time-Series Prediction Time-Series Prediction ของ PyCaret ไม่ได้ใช้บนพื้นฐานของ Scikit Learn ตามปกติที่เราทำกัน ตัวนี้สร้างบนพื้นฐานของ Facebook Phophet Engine https://facebook.github.io/prophet/ ![image.png](data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAJoAAAA0CAYAAABy6SGJAAABQ2lDQ1BJQ0MgUHJvZmlsZQAAKJFjYGASSSwoyGFhYGDIzSspCnJ3UoiIjFJgf8bAwcDHwMnAxWCZmFxc4BgQ4ANUwgCjUcG3awyMIPqyLsgsXtbzp+VX7HYMCuT7sftN3AJM9SiAKyW1OBlI/wHi1OSCohIGBsYUIFu5vKQAxO4AskWKgI4CsueA2OkQ9gYQOwnCPgJWExLkDGTfALIFkjMSgWYwvgCydZKQxNOR2FB7QYDbx13BLdTHR8HDhYBryQAlqRUlINo5v6CyKDM9o0TBERhKqQqeecl6OgpGBkZGDAygMIeo/nwDHJaMYhwIsUKgH608GRiYchFiCQEMDDs+gLyKEFPVYWDgOc7AcCC2ILEoEe4Axm8sxWnGRhA293YGBtZp//9/DmdgYNdkYPh7/f//39v///+7jIGB+RZQ7zcAm+9e/8SXSUYAAABWZVhJZk1NACoAAAAIAAGHaQAEAAAAAQAAABoAAAAAAAOShgAHAAAAEgAAAESgAgAEAAAAAQAAAJqgAwAEAAAAAQAAADQAAAAAQVNDSUkAAABTY3JlZW5zaG90qfldpQAAAdVpVFh0WE1MOmNvbS5hZG9iZS54bXAAAAAAADx4OnhtcG1ldGEgeG1sbnM6eD0iYWRvYmU6bnM6bWV0YS8iIHg6eG1wdGs9IlhNUCBDb3JlIDYuMC4wIj4KICAgPHJkZjpSREYgeG1sbnM6cmRmPSJodHRwOi8vd3d3LnczLm9yZy8xOTk5LzAyLzIyLXJkZi1zeW50YXgtbnMjIj4KICAgICAgPHJkZjpEZXNjcmlwdGlvbiByZGY6YWJvdXQ9IiIKICAgICAgICAgICAgeG1sbnM6ZXhpZj0iaHR0cDovL25zLmFkb2JlLmNvbS9leGlmLzEuMC8iPgogICAgICAgICA8ZXhpZjpQaXhlbFlEaW1lbnNpb24+NTI8L2V4aWY6UGl4ZWxZRGltZW5zaW9uPgogICAgICAgICA8ZXhpZjpQaXhlbFhEaW1lbnNpb24+MTU0PC9leGlmOlBpeGVsWERpbWVuc2lvbj4KICAgICAgICAgPGV4aWY6VXNlckNvbW1lbnQ+U2NyZWVuc2hvdDwvZXhpZjpVc2VyQ29tbWVudD4KICAgICAgPC9yZGY6RGVzY3JpcHRpb24+CiAgIDwvcmRmOlJERj4KPC94OnhtcG1ldGE+CuB5yzQAAATtSURBVHgB7Vo7axVBFJ4EQyJGRBEVQS2MoCSFJCoYCxECFlYqmh8gUX+AhZWFrYJt0MZOYpE/oIhNFNGIoqIRIUhMEw2KIj6C6H4XzmV277x27uwyzj0DN7s7c+Y8vvPNmdklXfuOX/kruDECFSPQXbF+Vs8INBBgojERakGAiVYLzGyEicYcqAUBJlotMLMRJhpzoBYEmGi1wMxGmGjMgVoQYKLVAjMbYaIxB2pBgIlWC8xsZBVDkEfg8IEBcebUQdHd3SVuTj8Sd2fm8gL85IUAE60A29o1vWLPzs2N3nX9fYVRfvRFgIlWQG725YKYvDUjVvf1iCcvFgqj/OiLQBf/m5AvdDyvDAIdXdGOHRkUA9s3ik+fv4s72VlsaflbGexYtgQCHUu0k0f3iovnxppQbdrQL67dvN985puwCHTs543BXVtySI4d2p175oewCHQs0b58/ZFD8t37j7lnfgiLQLJEGxnaJvDTtXsP34rnbxYbw89eLza+melkub99BJzfOh9PX9BawyeBp68WxPWpB1oZGpi8PG4kQBldpJOuZ8dHxfBgK8Gg8/ylKRJrXvt6e8SOrevF75U/Yv7DcrMfN1XEeyPDR4UR/J7Ifmg6XxuD2R9Zlvp0V509LEDkoUzbf+JqGfEW2SAVDY4DKCTHVEVarCs6fHQRcPBBZf/GbfUC+PlrRczNL7WQTOFWrsvHx5yCDnwI/tY5cXo0W5Wt1cMHW6w6l5WkWp2oDGiotHTv44NtTsh4bbZU41XGprLn2+dFNGxDcoByOcdqx08e1zmnKu2yLsyz6YK83FQ65XGf+1Dx+tg2zQHGqiOBaQ7GMK+4gOUjTRUYBtk6ce6QiYVV7tuKukayM5euFUnpChDmgcC+rXjOMvnoayO1eV4VTQUCtihKHu7baaSnrI4iAVTzm+QsLA6VrKkPC6usn6oXFZMNlzGTD/Lid9FVpUwQoiFYemtq19niVmjSh8RRQzWzNXl7sMmaxn0rInAaGSr3tmfyw6QPJAt1Vjb54DrmRTRsFbRdqFapS2WBgyCnTBbV6nTV5Rowycl2qU93pXhpTtHPqnzU+fM/9nsRzVS9XCqLDFQxaTSGFan7LEEy8pVIIPeFug8Vr24r02Hg4r9OZ7vHFxfbZWS8iKYyQMTQBa6ao+uDDpe3qTLnQiRTTmgZEqv89IkX/qqqX/PcqDJk6HPFyaCitiEvoqFqzUoHfl9yyW+JMthECpte+DCRQQU5VQJlFNt5Ew4Vr+xPp917EQ0JtpGgLJAgCrY/qjouH0LhA35ydaL5sI8xEFjWi/4GcbIx11ZFvK62bXJyvCrZ0HlS2XDp8yKai2IfGRCG3soAIEhiq1TyFitXRZ19AG/TqZsbWz8wIrxUviHWWN48g3ywVQXp0wdgUG2o4RBuW7EkCznToR1y0C0Tk+bytXoEnCtaqBJsexuiakNvkfi04GIbMiARncWIoOjXHcJN8LrYNM2nMVu8kMPWPJz5iWaTl2UbEwx/bLrkqWVk5Xmu987/JuSqkOUYARUCUW2dKge5Lw0EmGhp5DH6KJho0acoDQeZaGnkMfoomGjRpygNB5loaeQx+iiYaNGnKA0HmWhp5DH6KJho0acoDQeZaGnkMfoomGjRpygNB5loaeQx+iiYaNGnKA0HmWhp5DH6KJho0acoDQf/AVjS2x34TqZxAAAAAElFTkSuQmCC) เป็น Library การคาดการณ์ข้อมูลอนุกรมเวลาตามแบบจำลองการเติมซึ่งแนวโน้มที่ไม่เป็นเชิงเส้นเหมาะสมกับฤดูกาลประจำปี รายสัปดาห์ และรายวัน บวกกับเอฟเฟกต์วันหยุด ทำงานได้ดีที่สุดกับอนุกรมเวลาที่มีผลตามฤดูกาลที่แข็งแกร่งและข้อมูลทางประวัติศาสตร์หลายฤดูกาล Phophet มีความแข็งแกร่งต่อข้อมูลที่ขาดหายไปและการเปลี่ยนแปลงในแนวโน้ม และโดยทั่วไปแล้วจะจัดการกับค่าผิดปกติได้ดี ``` from pycaret.time_series import * exp_name = setup(data = bitcoin_df, target="market-price", fh = 12) ```ค่า Parameter ในการ Setup มีดังนี้ 1. *data = dataframe ที่เราต้่องการที่จะนำไป Train (จำเป็นต้องใส่) 1. *target = Column เป้าหมาย (y) ที่เราต้่องการให้ Model เรียนรู้และสามารถ Predict ค่า target ได้อย่างแม่นยำ (จำเป็นต้องใส่) 1. session_id = เลขประจำ session เป็นเลขอะไรก็ได้ เราสามารถใช้อ้างอิงได้ภายหลัง 1. Transform_target = หากข้อมูลเป้าหมาย (y) ไม่เป็น Normal Distribution อาทิเช่นเอนเอียงไปด้านนึง เราควรปรับค่านี้ให้เป็น True ซึ่งระบบจะทำการแปลงข้่อมูลเป้าหมาย (y) ให้เป็น Normal Distribution ให้เรา 1. fh = จำนวนข้อมูลที่เราต้องการที่จะให้ Model ของเรา Forecast 1. seasonal_period: ปรับค่า Seasonal_Period ที่โมเดลใช้เป็นสมมุติฐาน * B, C = 5 * D = 7 * W = 52 * M, BM, CBM, MS, BMS, CBMS = 12 * SM, SMS = 24 * Q, BQ, QS, BQS = 4 * A, Y, BA, BY, AS, YS, BAS, BYS = 1 * H = 24 * T, min = 60 * S = 60 1. fold_strategy: การแบ่งข้อมูลในการแยก Training set / Validation set * 'expanding' * 'rolling' (same as/aliased to 'expanding') * 'sliding' 1. fold: int, default = 3 จำนวน Fold ที่ใช้ในการ Cross Validation ### Expanding/Rolling Windows ![](https://i.stack.imgur.com/Zs2Xp.png)### Sliding Windows ![](https://i.stack.imgur.com/Xd62a.png)<jupyter_code>import pandas as pd from pycaret.time_series import * # Load the dataset df = pd.read_excel("https://github.com/Benjamnk/DSE-2/raw/main/cane_area_MPV_TSR_date.xlsx") # Drop the unwanted columns columns_to_drop = ['year', 'cane_type', 'gis_idkey', 'QT'] df = df.drop(columns_to_drop, axis=1) df = df.groupby('date').sum() # Drop duplicates from the "date" column df = df.drop_duplicates(subset='date') # Set the frequency of the index to daily ('D') df['date'] = pd.to_datetime(df['date']) df = df.set_index('date').asfreq('D') # Interpolate missing values df = df.interpolate() # Perform time series setup exp_name = setup(data=df, target='area', fh=12, fold=10, fold_strategy='sliding') import pandas as pd from pycaret.time_series import * # Load the dataset df = pd.read_excel("https://github.com/Benjamnk/DSE-2/raw/main/cane_area_MPV_TSR_date.xlsx") # Drop the unwanted columns columns_to_drop = ['year', 'cane_type', 'gis_idkey', 'QT'] df = df.drop(columns_to_drop, axis=1) df = df.groupby('date').sum() # Check for missing values print("Missing Values:") print(df.isnull().sum()) # Drop rows with missing values df = df.dropna() # Handling outliers def handle_outliers(data, column): q1 = np.percentile(data[column], 25) q3 = np.percentile(data[column], 75) iqr = q3 - q1 lower_bound = q1 - (1.5 * iqr) upper_bound = q3 + (1.5 * iqr) data = data[(data[column] >= lower_bound) & (data[column] <= upper_bound)] return data # Apply outlier handling to the 'area' column df = handle_outliers(df, 'area') # Reset the index after cleaning df = df.reset_index(drop=True) # Verify the cleaned dataset print("Cleaned Dataset:") print(df.head()) # Perform time series setup exp_name = setup(data=df, target='area', fh=12, fold=10, fold_strategy='sliding') df.head()<jupyter_output><empty_output><jupyter_text>## Explore Data เราสามารถใช้ Plot เพื่อดูข้อมูลได้เลยง่ายๆ <jupyter_code>df.plot()<jupyter_output><empty_output><jupyter_text>## Compare Models เราสามารถใช้่คำสั่ง ``compare_models()`` เพื่อหา Model ที่ดีที่สุดในการทำ Forecasting * Model = ชื่อ Model * MAE = Mean Absolute Error (https://en.wikipedia.org/wiki/Mean_absolute_error) ยิ่งน้อยยิ่งดี * MSE = Mean Square Error (https://en.wikipedia.org/wiki/Mean_squared_error) ยิ่งน้อยยิ่งดี * RMSE = Root Mean Square Error (https://en.wikipedia.org/wiki/Root-mean-square_deviation) ยิ่งน้อยยิ่งดี * R2 = Coefficient of determination (https://en.wikipedia.org/wiki/Coefficient_of_determination) ยิ่งเข้าใกล้ 1 ยิ่งดี * RMSLE - Root Mean Squared Logaritmic Error (RMSLE) (https://hrngok.github.io/posts/metrics/#:~:text=Root%20Mean%20Squared%20Logaritmic%20Error%20(RMSLE)&text=It%20is%20the%20Root%20Mean,possible%200%20(zero)%20values.) ยิ่งน้อยยิ่งดี * MAPE - Mean absolute percentage error (https://en.wikipedia.org/wiki/Mean_absolute_percentage_error) ยิ่งน้อยยิ้งดี * TT - Time Taken (Sec) เวลาในการใช้<jupyter_code>best = compare_models(fold=1, round=1)<jupyter_output><empty_output><jupyter_text>## Create Model เราสามารถใช้่คำสั่ง create_model("{ชื่อ_Model}") เพื่อสร้าง model ตามที่เราต้องการ * 'naive' - Naive Forecaster * 'grand_means' - Grand Means Forecaster * 'snaive' - Seasonal Naive Forecaster (disabled when seasonal_period = 1) * 'polytrend' - Polynomial Trend Forecaster * 'arima' - ARIMA family of models (ARIMA, SARIMA, SARIMAX) * 'auto_arima' - Auto ARIMA * 'exp_smooth' - Exponential Smoothing * 'croston' - Croston Forecaster * 'ets' - ETS * 'theta' - Theta Forecaster * 'tbats' - TBATS * 'bats' - BATS * 'prophet' - Prophet Forecaster * 'lr_cds_dt' - Linear w/ Cond. Deseasonalize & Detrending * 'en_cds_dt' - Elastic Net w/ Cond. Deseasonalize & Detrending * 'ridge_cds_dt' - Ridge w/ Cond. Deseasonalize & Detrending * 'lasso_cds_dt' - Lasso w/ Cond. Deseasonalize & Detrending * 'lar_cds_dt' - Least Angular Regressor w/ Cond. Deseasonalize & Detrending * 'llar_cds_dt' - Lasso Least Angular Regressor w/ Cond. Deseasonalize & Detrending * 'br_cds_dt' - Bayesian Ridge w/ Cond. Deseasonalize & Deseasonalize & Detrending * 'huber_cds_dt' - Huber w/ Cond. Deseasonalize & Detrending * 'par_cds_dt' - Passive Aggressive w/ Cond. Deseasonalize & Detrending * 'omp_cds_dt' - Orthogonal Matching Pursuit w/ Cond. Deseasonalize & Detrending * 'knn_cds_dt' - K Neighbors w/ Cond. Deseasonalize & Detrending * 'dt_cds_dt' - Decision Tree w/ Cond. Deseasonalize & Detrending * 'rf_cds_dt' - Random Forest w/ Cond. Deseasonalize & Detrending * 'et_cds_dt' - Extra Trees w/ Cond. Deseasonalize & Detrending * 'gbr_cds_dt' - Gradient Boosting w/ Cond. Deseasonalize & Detrending * 'ada_cds_dt' - AdaBoost w/ Cond. Deseasonalize & Detrending * 'lightgbm_cds_dt' - Light Gradient Boosting w/ Cond. Deseasonalize & Detrending * 'catboost_cds_dt' - CatBoost w/ Cond. Deseasonalize & Detrending <jupyter_code>ada_cds_dt_model = create_model("ada_cds_dt")<jupyter_output><empty_output><jupyter_text>## Prediction<jupyter_code>predictions = predict_model(ada_cds_dt_model) predictions df[-30:] predictions.insert(1, "area", df[-30:], True) predictions predictions.plot()<jupyter_output><empty_output><jupyter_text>## Plot Model ``plot_model({model}, plot={plot_name})`` default is changed to 'forecast'. List of available plots (ID - Name): * 'ts' - Time Series Plot * 'train_test_split' - Train Test Split * 'cv' - Cross Validation * 'acf' - Auto Correlation (ACF) * 'pacf' - Partial Auto Correlation (PACF) * 'decomp' - Classical Decomposition * 'decomp_stl' - STL Decomposition * 'diagnostics' - Diagnostics Plot * 'diff' - Difference Plot * 'periodogram' - Frequency Components (Periodogram) * 'fft' - Frequency Components (FFT) * 'ccf' - Cross Correlation (CCF) * 'forecast' - "Out-of-Sample" Forecast Plot * 'insample' - "In-Sample" Forecast Plot * 'residuals' - Residuals Plot<jupyter_code>plot_model(ada_cds_dt_model, "ts") plot_model(ada_cds_dt_model, "forecast") plot_model(arima_model, "train_test_split") tuned_arima_model = tune_model(arima_model)<jupyter_output><empty_output><jupyter_text>### Finalize, Save Model and Load Model เมื่อเราได้ Model ที่พร้อมแล้ว เราสามารถทำ ``finalize_model({Model})`` **เพื่อทำการ Train Model บน Dataset ทั้่งหมด (ALL) ใหม่** เพื่อให้โมเดลได้เรียนรู้บน Data ให้ได้มากที่สุดบนรูปแบบ Estimator (Model) และ Hyperparamters ที่เราได้เลือกและได้กำหนดค่าไว้ในตัวแปร {Model} **Save model** ``save_model({Final_Model}, "ชื่อไฟล์")`` และ **Load Model** ``{Model} = load_model("ชื่อไฟล์")`` เหมือนตัวอื่นๆ<jupyter_code>final_best = finalize_model(best) save_model(final_best, "snaive_bitcoin_best") loaded_snaive_bitcoin = load_model("snaive_bitcoin_best") loaded_snaive_bitcoin<jupyter_output><empty_output><jupyter_text>## Exercise เรามาลอง Predict ข้อมูลที่มี Pattern ชัดเจนมากกว่า Bitcoin กันเถอะ ข้อมูลคุณภาพอากาศ ![](https://www.researchgate.net/profile/Irena-Markovska-2/publication/340899809/figure/fig2/AS:883882824065027@1587745495992/Dependences-between-criterion-COGT-and-criteria-PT08S1CO-C6H6GT-PT08S2NMHC.ppm) ปริมาณสารพิษในอากาศที่จับได้ ประกอบไปด้วย * True hourly averaged concentration CO in mg/m3 - CO(GT) * PT08.S1 (tin oxide) hourly averaged sensor response - PT08.S1(CO) * True hourly averaged Benzene concentration in microg/m3 - C6H6(GT) * PT08.S2 (titania) hourly averaged sensor response - PT08.S2(NMHC) * True hourly averaged NOx concentration in ppb - NOx(GT) * PT08.S3 (tungsten oxide) hourly averaged sensor response - PT08.S3(NOx)* True hourly averaged NO2 concentration in microg/m3 - NO2(GT);8) * PT08.S4 (tungsten oxide) hourly averaged sensor response - PT08.S4(NO2) * PT08.S5 (indium oxide) hourly averaged sensor response - PT08.S5(O3); * Temperature in °C - T * Relative Humidity (%) - RH * Absolute Humidity - AH (PDF) Application of the InterCriteria Analysis Over Air Quality Data. Available from: https://www.researchgate.net/publication/318029588_Application_of_the_InterCriteria_Analysis_Over_Air_Quality_Data [accessed Aug 14 2022]. <jupyter_code>from pycaret.datasets import get_data airquality = get_data('airquality') airquality.info() # รวม Date+Time เป็น datetime airquality["datetime"] = pd.to_datetime(airquality.Date.astype(str) + ' ' + airquality.Time.astype(str)) airquality = airquality.drop(["Date","Time"], axis=1) airquality # dataframe จัดกลุ่มรวมยอด ตามวันที่ airquality=airquality.groupby('datetime',as_index=True).sum() #แปลง index ที่เป็นอยู่ date ให้เป็น date_time airquality.index=pd.to_datetime(airquality.index) airquality from pycaret.time_series import * exp_name = setup(data = airquality, target="CO(GT)", fh = 12) # Start your work here....<jupyter_output><empty_output><jupyter_text># Clusteringคือการจัดกลุ่มข้อมูลโดยอัตโนมัติ เป็นรูปแบบ Unsupervised Learning โดยไม่ต้องมีค่า y (target class) มักจะใช้ในงานจัดกลุ่มลูกค้า (Customers Segmentation) หรือจัดกลุ่มเนื้อหาเอกสารหรือบทความต่างๆ (Document Grouping) เป็นต้น<jupyter_code>import numpy as np import pandas as pd import matplotlib.pyplot as plt import matplotlib as mpl import seaborn as sns from pycaret.clustering import * from sklearn.datasets import make_blobs mpl.rcParams['figure.dpi'] = 100<jupyter_output><empty_output><jupyter_text>## Synthetic Dataset รอบนี้เราลองสร้าง Dataset ปลอมๆ ขึ้นมาเอง เพื่อให้รู้ว่าการสร้าง Dataset ไม่จำเป็นต้องมาจากที่มีอยู่แล้วก็ได้ <jupyter_code># Generating dataset cols = ['column1', 'column2', 'column3', 'column4', 'column5'] arr = make_blobs(n_samples = 1000, n_features = 5, random_state = 20, centers = 3, cluster_std = 1) data = pd.DataFrame(data = arr[0], columns = cols) data.head()<jupyter_output><empty_output><jupyter_text>ในรอบนี้ เราสร้างข้อมูลจำนวน 1000 แถว มี 5 Features (Columns) โดยมีกระจุกตัวกันอยู่ 3 กลุ่ม (Clusters) การใช้ชุดข้อมูลสังเคราะห์เพื่อทดสอบโมเดลการทำคลัสเตอร์ของเรามีประโยชน์หลายประการ ข้อได้เปรียบหลักคือเราทราบจำนวนคลัสเตอร์จริงแล้ว ดังนั้นเราจึงสามารถประเมินประสิทธิภาพของโมเดลได้อย่างง่ายดาย โดยทั่วไป ข้อมูลในโลกแห่งความเป็นจริงจะซับซ้อนกว่าเนื่องจากไม่มีคลัสเตอร์ที่แยกจากกันอย่างชัดเจนเสมอไป## Explore Data เราลองมาดู Data เรากันว่าหน้าตาเป็นอย่างไร### Histogram เนื่องจากมี 3 Clusters ที่กระจุกตัวแตกต่างกัน ทำให้เราเห็นข้อมูลในแต่ละ Column มีจุด Peak อยู่ 2-3 จุด หรือเรียกว่า (bimodal หรือ multimodal) <jupyter_code># Plotting histogram data.hist(bins = 30, figsize = (10,7), grid = False) plt.show()<jupyter_output><empty_output><jupyter_text>### Color-encoded Matrix ดูความสัมพันธ์ระหว่าง Feature X ด้วย ``corr()`` และสร้างเป็น Heatmap มาให้ดู * 1 ไปด้วยกันเสมอ * -1 สวนทางกันเสมอ<jupyter_code># Plotting color-encoded matrix plt.figure(figsize=(8, 6)) sns.heatmap(data.corr().round(decimals=2), annot=True) plt.show()<jupyter_output><empty_output><jupyter_text>สังเกตุได้ว่า Col2, Col3 จะมี Correlation ใกล้เคียงกันสูง (แต่อย่างไรก็ตามก็ขึ้นกับการ Random ณ ขณะนั้่นด้่วย)### การดู Scatter plot เพื่อดู ความสัมพันธ์ระหว่าง Feature X<jupyter_code>sns.pairplot(data) plt.show()<jupyter_output><empty_output><jupyter_text>## Clustering Prediction การ Setup ของ Clustering จะมีความแตกต่างจากตัวอื่นๆ ดังนี้ 1. *data = dataframe ที่เราต้่องการที่จะนำไป Train (จำเป็นต้องใส่) 1. session_id = เลขประจำ session เป็นเลขอะไรก็ได้ เราสามารถใช้อ้างอิงได้ภายหลัง 1. pca: bool, default = False = เปิดการใช้ PCA เพื่อลดจำนวน Features ลงหรือไม่ 1. pca_method: str, default = 'linear' วิธีการทำ PCA, Possible values are: - 'linear': Uses Singular Value Decomposition. - 'kernel': Dimensionality reduction through the use of RBF kernel. - 'incremental': Similar to 'linear', but more efficient for large datasets. 1. pca_components: int, float, str or None, default = None จำนวนของ pca ที่ต้องการเก็บไว้. This parameter is ignored when `pca=False`. - If None: All components are kept. - If int: Absolute number of components. - If "mle": Minka’s MLE is used to guess the dimension (ony for pca_method='linear'). 1. Transformation = หากข้อมูลไม่เป็น Normal Distribution อาทิเช่นเอนเอียงไปด้านนึง เราควรปรับค่านี้ให้เป็น True ซึ่งระบบจะทำการแปลงข้่อมูลให้เป็น Normal Distribution ให้เรา 1. Normalize = ทำการแปลงค่า numeric_features ทั้งหมดให้อยู่ระหว่าง -e..e โดยใช้หลักการของ z-score = (x - mean) / standard deviation <jupyter_code>from pycaret.clustering import * # PyCaret environment setup.Setting different parameters in setup() function # to prepare model training and deployment data. cluster = setup(data, session_id = 7652)<jupyter_output>INFO:logs:PyCaret ClusteringExperiment INFO:logs:Logging name: cluster-default-name INFO:logs:ML Usecase: MLUsecase.CLUSTERING INFO:logs:version 3.0.0.rc3 INFO:logs:Initializing setup() INFO:logs:self.USI: f6c1 INFO:logs:self.variable_keys: {'exp_id', '_gpu_n_jobs_param', 'display_container', 'memory', 'X', '_all_metrics', 'variable_keys', 'exp_name_log', 'n_jobs_param', 'USI', 'logging_param', '_available_plots', '_ml_usecase', 'log_plots_param', '_all_models_internal', 'master_model_container', '_all_models', 'idx', 'seed', 'data', 'pipeline', 'gpu_param', 'html_param'} INFO:logs:Checking environment INFO:logs:python_version: 3.7.13 INFO:logs:python_build: ('default', 'Apr 24 2022 01:04:09') INFO:logs:machine: x86_64 INFO:logs:platform: Linux-5.4.188+-x86_64-with-Ubuntu-18.04-bionic INFO:logs:Memory: svmem(total=13617745920, available=11611111424, percent=14.7, used=1977847808, free=9328275456, active=2347229184, inactive=1563693056, buffers=221483008, cached=2090139648, shared=13148[...]<jupyter_text>## Compare Model? Compare Model ไม่สามารถทำได้ เพราะเนื่องจากเราไม่มีเฉลย เพราะฉะนั้นเราต้องไล่ Create_model ไปแล้วดูผลของการจัดกลุ่มด้วยตาของมนุษย์แทน## Create Model สร้าง Model ด้วยคำสั่ง ``create_model({ชื่อ Model}, num_clusters={จำนวน Cluster})`` หากเราไม่กำหนด ``num_clusters`` จะมีค่า Default = 4 * 'kmeans' - K-Means Clustering * 'ap' - Affinity Propagation * 'meanshift' - Mean shift Clustering * 'sc' - Spectral Clustering * 'hclust' - Agglomerative Clustering * 'dbscan' - Density-Based Spatial Clustering * 'optics' - OPTICS Clustering * 'birch' - Birch Clustering * 'kmodes' - K-Modes Clustering<jupyter_code>kmeans_model = create_model("kmeans")<jupyter_output>INFO:logs:Initializing create_model() INFO:logs:create_model(self=<pycaret.clustering.oop.ClusteringExperiment object at 0x7efd7cb13890>, estimator=kmeans, num_clusters=4, fraction=0.05, ground_truth=None, round=4, fit_kwargs=None, experiment_custom_tags=None, verbose=True, system=True, add_to_model_list=True, raise_num_clusters=False, display=None, kwargs={}) INFO:logs:Checking exceptions <jupyter_text>### ค่า Error ของการทำ Clustering ค่า Error จากการทำ Clustering นั้น ไม่ได้เกิดจาก y label (เฉลย) ซึ่งข้อมูลนี้มันไม่มี แต่มันเกิดจากพอเค้าลอง Assign Cluster ให้แต่ละ Dataset แล้วดูว่าคุณลักษณะของ Cluster นั้น ดีหรือแย่อย่างไร อาทิเช่น ถ้าทุกจุดเกาะกลุ่มกันแน่นจนเป็นจุดเดียวกัน = ดี , แต่ถ้าหากกระจายไปปนกับ cluster อื่นๆ = ไม่ดี โดยมีหลักในการดูดังนี้ * Silhouette - มีค่าระหว่าง -1 ถึง 1 - ยิ่งเยอะยิ่งดี ถ้า 1 ทุกจุดใน Cluster มีคุณลักษณะตรงกับจุดอื่นๆใน Cluster - https://en.wikipedia.org/wiki/Silhouette_(clustering) * Calinski-Harabasz - สัดส่วนระยะห่าง (Distance Ratio) ของจุดระหว่างจุดใน Cluster กับ Cluster อื่นๆ หารด้วย จุดใน Cluster กับ Cluster ของตน - คะแนนยิ่งเยอะยิ่งดี * Davies-Bouldin - สัดส่วนระยะห่าง (Distance Ratio) ของ จุดใน Cluster กับ Cluster ของตน หารด้วย จุดระหว่างจุดใน Cluster กับ Cluster อื่นๆ - คะแนนยิ่งน้อยยิ่งดี * Homogeneity - มีค่าระหว่าง 0 ถึง 1 - ยิ่งเยอะยิ่งดี - ในกรณีมี Class y เราสามารถวัดได้ว่า มีจุดไหนที่ถูก Cluster ได้มากกว่า 2 จุดหรือไม่ ถ้าไม่มีเลย แสดงว่าแบ่ง Cluster ได้ดี ค่่า Homongenity = 1.0 * Rand Index - มีค่าระหว่าง 0 ถึง 1 - ยิ่งเยอะยิ่งดี - ในกรณีมี Class y ความแม่นยำของการเลือก Cluster ที่ถูกต้องให้แต่ละจุด (เหมือน Accuracy ใน Classification) * Completeness - มีค่าระหว่าง 0 ถึง 1 - ยิ่งเยอะยิ่งดี - ในกรณีมี Class y จำนวนจุดในข้อมูลที่เฉลยหากได้อยู่ Cluster เดียวกันหมดได้จะค่าเป็น 1 ดูว่าสามารถเก็บมาครบได้หรือไม่ (เหมือน Recall ใน Classification)## Plot Model ``plot_model({ModelVar}, '{PlotName}')`` * 'cluster' - Cluster PCA Plot (2d) * 'tsne' - Cluster t-SNE (3d) * 'elbow' - Elbow Plot * 'silhouette' - Silhouette Plot * 'distance' - Distance Plot * 'distribution' - Distribution Plot### Elbow Plot ใช้หาจำนวน Cluster ที่ดีที่สุดบน Dataset<jupyter_code># Plotting the model plot_model(kmeans_model, 'elbow')<jupyter_output>INFO:logs:Initializing plot_model() INFO:logs:plot_model(plot=elbow, fold=None, use_train_data=False, verbose=True, display=None, display_format=None, estimator=KMeans(algorithm='auto', copy_x=True, init='k-means++', max_iter=300, n_clusters=4, n_init=10, random_state=7652, tol=0.0001, verbose=0), feature_name=None, fit_kwargs=None, groups=None, label=False, plot_kwargs=None, save=False, scale=1, self=<pycaret.clustering.oop.ClusteringExperiment object at 0x7efd7cb13890>, system=True) INFO:logs:Checking exceptions <jupyter_text>เมื่อเรารู้แล้่วว่าจำนวน Cluster ที่ดีที่สุดคือ 3 เราก็ทำการ Train ใหม่ให้แม่นยำขึ้น<jupyter_code>kmeans_model = create_model('kmeans', num_clusters = 3)<jupyter_output>INFO:logs:Initializing create_model() INFO:logs:create_model(self=<pycaret.clustering.oop.ClusteringExperiment object at 0x7efd7cb13890>, estimator=kmeans, num_clusters=3, fraction=0.05, ground_truth=None, round=4, fit_kwargs=None, experiment_custom_tags=None, verbose=True, system=True, add_to_model_list=True, raise_num_clusters=False, display=None, kwargs={}) INFO:logs:Checking exceptions <jupyter_text>สังเกตุได้ว่า Error Matrices ทุกตัวดีขึ้น อย่างมีนัยสำคัญ### PCA Plot ทำการ Dimensionally Reduction โดยการใช้ Principal Component Analysis (PCA) จาก 5 มิติ -> 2 มิติ เพื่อให้ทำการแยกแยะง่ายขึ้น<jupyter_code># Plotting PCA plot plot_model(kmeans_model, 'cluster')<jupyter_output>INFO:logs:Initializing plot_model() INFO:logs:plot_model(plot=cluster, fold=None, use_train_data=False, verbose=True, display=None, display_format=None, estimator=KMeans(algorithm='auto', copy_x=True, init='k-means++', max_iter=300, n_clusters=3, n_init=10, random_state=7652, tol=0.0001, verbose=0), feature_name=None, fit_kwargs=None, groups=None, label=False, plot_kwargs=None, save=False, scale=1, self=<pycaret.clustering.oop.ClusteringExperiment object at 0x7efd7cb13890>, system=True) INFO:logs:Checking exceptions <jupyter_text>### TSNE Plot<jupyter_code># Plot แบบ 3 มิติด้่วย tsne แต่ใช้เวลารันนาน plot_model(kmeans_model, 'tsne')<jupyter_output>INFO:logs:Initializing plot_model() INFO:logs:plot_model(plot=tsne, fold=None, use_train_data=False, verbose=True, display=None, display_format=None, estimator=KMeans(algorithm='auto', copy_x=True, init='k-means++', max_iter=300, n_clusters=3, n_init=10, random_state=7652, tol=0.0001, verbose=0), feature_name=None, fit_kwargs=None, groups=None, label=False, plot_kwargs=None, save=False, scale=1, self=<pycaret.clustering.oop.ClusteringExperiment object at 0x7efd7cb13890>, system=True) INFO:logs:Checking exceptions <jupyter_text>### Silhouette Plot<jupyter_code>plot_model(kmeans_model, 'silhouette')<jupyter_output>INFO:logs:Initializing plot_model() INFO:logs:plot_model(plot=silhouette, fold=None, use_train_data=False, verbose=True, display=None, display_format=None, estimator=KMeans(algorithm='auto', copy_x=True, init='k-means++', max_iter=300, n_clusters=3, n_init=10, random_state=7652, tol=0.0001, verbose=0), feature_name=None, fit_kwargs=None, groups=None, label=False, plot_kwargs=None, save=False, scale=1, self=<pycaret.clustering.oop.ClusteringExperiment object at 0x7efd7cb13890>, system=True) INFO:logs:Checking exceptions <jupyter_text>### Distance Plot<jupyter_code>plot_model(kmeans_model, 'distance')<jupyter_output>INFO:logs:Initializing plot_model() INFO:logs:plot_model(plot=distance, fold=None, use_train_data=False, verbose=True, display=None, display_format=None, estimator=KMeans(algorithm='auto', copy_x=True, init='k-means++', max_iter=300, n_clusters=3, n_init=10, random_state=7652, tol=0.0001, verbose=0), feature_name=None, fit_kwargs=None, groups=None, label=False, plot_kwargs=None, save=False, scale=1, self=<pycaret.clustering.oop.ClusteringExperiment object at 0x7efd7cb13890>, system=True) INFO:logs:Checking exceptions <jupyter_text>### Distribution Plot<jupyter_code>plot_model(kmeans_model, 'distribution')<jupyter_output>INFO:logs:Initializing plot_model() INFO:logs:plot_model(plot=distribution, fold=None, use_train_data=False, verbose=True, display=None, display_format=None, estimator=KMeans(algorithm='auto', copy_x=True, init='k-means++', max_iter=300, n_clusters=3, n_init=10, random_state=7652, tol=0.0001, verbose=0), feature_name=None, fit_kwargs=None, groups=None, label=False, plot_kwargs=None, save=False, scale=1, self=<pycaret.clustering.oop.ClusteringExperiment object at 0x7efd7cb13890>, system=True) INFO:logs:Checking exceptions <jupyter_text>## Evaluate Model<jupyter_code>evaluate_model(kmeans_model)<jupyter_output>INFO:logs:Initializing evaluate_model() INFO:logs:evaluate_model(self=<pycaret.clustering.oop.ClusteringExperiment object at 0x7efd7cb13890>, estimator=KMeans(algorithm='auto', copy_x=True, init='k-means++', max_iter=300, n_clusters=3, n_init=10, random_state=7652, tol=0.0001, verbose=0), fold=None, fit_kwargs=None, plot_kwargs=None, feature_name=None, groups=None, use_train_data=False) <jupyter_text>## เรามาลอง Setup ใหม่แบบเปิดให้ PCA ให้เราได้กันเถอะ จะได้รู้ว่าดีขึ้นแค่ไหน<jupyter_code>from pycaret.clustering import * # PyCaret environment setup.Setting different parameters in setup() function # to prepare model training and deployment data. cluster = setup(data, session_id = 7653, pca=True, pca_method="linear") new_kmeans_model = create_model('kmeans', num_clusters = 3)<jupyter_output>INFO:logs:Initializing create_model() INFO:logs:create_model(self=<pycaret.clustering.oop.ClusteringExperiment object at 0x7efd7d154810>, estimator=kmeans, num_clusters=3, fraction=0.05, ground_truth=None, round=4, fit_kwargs=None, experiment_custom_tags=None, verbose=True, system=True, add_to_model_list=True, raise_num_clusters=False, display=None, kwargs={}) INFO:logs:Checking exceptions <jupyter_text>ไม่ได้ช่วย แสดงว่าโจทย์นี้ยังไม่ซับซ้อนเท่าไร่ กระจายตัวกันสวยอยู่แล้วใน 5-Dimension อยู่แล้ว จะแยกใน 2-Dimension ก็ไม่ต่างกัน## Assign Model เหมือน Prediction แต่ว่าเนื่องจากเป็น Clustering ที่ไม่มี Y จะใช้คำว่า Prediction ไม่ได้ เลยใช้คำว่า Assign แทน , คืนค่า Cluster Assigment ของ Training Dataset มาให้ทั้งหมด ``assign_model({ModelVar})`` <jupyter_code>assignments = assign_model(kmeans_model) assignments<jupyter_output><empty_output><jupyter_text>## Finalize, Save Model and Load Model เหมือนกับของทุกๆ Library ครับ## Clustering Exercise * ลอง Clustering กับ Data จริง ชื่อว่า ``jewellery`` ฐานข้อมูลสมาขิกลูกค้าที่เข้ามาซื้อเครื่องประดับของร้านค้าประกอบไปด้วย 1. age - อายุ 1. income - รายได้ (USD) 1. SpendingScore - คะแนนการใช้จ่ายในร้านค้า ยิ่งเยอะยิ่งใช้จ่ายเยอะ 1. Savings - เงินเก็บที่มีทั้งหมด (USD) ลองหาดูว่าลูกค้าของร้านนี้มีกี่กลุ่ม และแต่ละกลุ่มมีจุดเด่นเรื่องอะไรบ้าง<jupyter_code>from pycaret.datasets import get_data jewellery = get_data('jewellery') jewellery.info() from pycaret.clustering import * #exp = setup(data = jewellery) << Start working here.<jupyter_output><empty_output>
no_license
/time_serie_PyCaret_Ferti_CT.ipynb
Benjamnk/DSE-2
30
<jupyter_start><jupyter_text>Here I am going to apply Principal component analysis on the given dataset using Scikit-learn and find out the dimensions(also known as components) with maximum variance(where the data is spread out).Features with little variance in the data are then projected into new lower dimension. Then the models are trained on transformed dataset to apply machine learning models.Then I have applied Random forest Regressor on old and the transformed datasets and compared them. If you want to know the basic concept behind Principal Component Analysis check this out. (https://www.kaggle.com/nirajvermafcb/d/ludobenistant/hr-analytics/principal-component-analysis-explained)<jupyter_code># This Python 3 environment comes with many helpful analytics libraries installed # It is defined by the kaggle/python docker image: https://github.com/kaggle/docker-python # For example, here's several helpful packages to load in import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the "../input/" directory. # For example, running this (by clicking run or pressing Shift+Enter) will list the files in the input directory from subprocess import check_output print(check_output(["ls", "../input"]).decode("utf8")) # Any results you write to the current directory are saved as output. import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import matplotlib.pyplot as plt import seaborn as sns %matplotlib inline df=pd.read_csv('../input/data.csv') #Replace it with your path where the data file is stored df.head() df.describe() df.corr()<jupyter_output><empty_output><jupyter_text>Let us find if there is any relationship between temperature and apparent_temperature<jupyter_code>x=df['temperature'] y=df['apparent_temperature'] colors=('r','b') plt.xlabel('Temperature') plt.ylabel('Apparent_temperature') plt.scatter(x,y,c=colors)<jupyter_output><empty_output><jupyter_text>The temperture given here is in fahrenheit.We will convert it into Celsius using the formula **Celsius=(Fahrenheit-32)* (5/9)**<jupyter_code>Fahrenheit=df['temperature']<jupyter_output><empty_output><jupyter_text>Converting it into the list so we can apply lambda function<jupyter_code>F=Fahrenheit.tolist()<jupyter_output><empty_output><jupyter_text>Applying Lambda function<jupyter_code>C= map(lambda x: (float(5)/9)*(x-32),F) Celsius=(list(C))<jupyter_output><empty_output><jupyter_text>Converting list to series<jupyter_code>temperature_celsius=pd.Series(Celsius)<jupyter_output><empty_output><jupyter_text>Applying the series to temperature column<jupyter_code>df['temperature']= temperature_celsius df['temperature'] df.head()<jupyter_output><empty_output><jupyter_text>Thus we have converted the temperature column from fahrenheit to degree celsius.Similarly we are now converting apparent_temperature to degree celsius.<jupyter_code>at_fahrenheit=df['apparent_temperature'] at_F=at_fahrenheit.tolist() at_C= map(lambda x: (float(5)/9)*(x-32),at_F) at_Celsius=(list(C)) at_celsius=pd.Series(at_Celsius) at_celsius apparent_temperature_celsius=pd.Series(at_Celsius) print(apparent_temperature_celsius) df['apparent_temperature']= temperature_celsius df['apparent_temperature'] df.head() X = df.iloc[:,1:8] # all rows, all the features and no labels y = df.iloc[:, 0] # all rows, label only #X #y df.corr() correlation = df.corr() plt.figure(figsize=(10,10)) sns.heatmap(correlation, vmax=1, square=True,annot=True,cmap='viridis') plt.title('Correlation between different fearures')<jupyter_output><empty_output><jupyter_text>Standardising data<jupyter_code># Scale the data to be between -1 and 1 from sklearn.preprocessing import StandardScaler scaler = StandardScaler() X=scaler.fit_transform(X) X from sklearn.decomposition import PCA pca = PCA() pca.fit_transform(X) pca.get_covariance() explained_variance=pca.explained_variance_ratio_ explained_variance with plt.style.context('dark_background'): plt.figure(figsize=(6, 4)) plt.bar(range(7), explained_variance, alpha=0.5, align='center', label='individual explained variance') plt.ylabel('Explained variance ratio') plt.xlabel('Principal components') plt.legend(loc='best') plt.tight_layout()<jupyter_output><empty_output><jupyter_text>**Thus we can see from the above plot that first two components constitute almost 55% of the variance.Third,fourth and fifth components has 42% of the data sprad.The last component has less than 5% of the variance.Hence we can drop the fifth component **<jupyter_code>pca=PCA(n_components=5) X_new=pca.fit_transform(X) X_new pca.get_covariance() explained_variance=pca.explained_variance_ratio_ explained_variance with plt.style.context('dark_background'): plt.figure(figsize=(6, 4)) plt.bar(range(5), explained_variance, alpha=0.5, align='center', label='individual explained variance') plt.ylabel('Explained variance ratio') plt.xlabel('Principal components') plt.legend(loc='best') plt.tight_layout() from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=1) X_train.shape # Establish model from sklearn.ensemble import RandomForestRegressor model = RandomForestRegressor() # Try different numbers of n_estimators - this will take a minute or so estimators = np.arange(10, 200, 10) scores = [] for n in estimators: model.set_params(n_estimators=n) model.fit(X_train, y_train) scores.append(model.score(X_test, y_test)) print(scores) plt.title("Effect of n_estimators") plt.xlabel("n_estimator") plt.ylabel("score") plt.plot(estimators, scores) from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X_new, y, test_size=0.2, random_state=1) X_train.shape # Establish model from sklearn.ensemble import RandomForestRegressor model = RandomForestRegressor() # Try different numbers of n_estimators - this will take a minute or so estimators = np.arange(10, 200, 10) scores = [] for n in estimators: model.set_params(n_estimators=n) model.fit(X_train, y_train) scores.append(model.score(X_test, y_test)) print(scores) plt.title("Effect of n_estimators") plt.xlabel("n_estimator") plt.ylabel("score") plt.plot(estimators, scores)<jupyter_output><empty_output>
no_license
/notebook/principal-component-analysis-with-scikit-learn.ipynb
rohitsabu08/spider
10
<jupyter_start><jupyter_text>##### quaternion to rotation matrix $ R_q = \begin{bmatrix} 1-2cc-2dd & 2bc-2ad & 2bd+2ac \\ 2bc+2ad & 1-2bb-2dd & 2cd-2ab \\ 2bd-2ac & 2cd+2ab & 1-2bb-2cc \end{bmatrix} $ [AA](AxisAngleAlternative.md) ##### quaternion to rotation matrix $\color{red}{ R_q = \overbrace{\underbrace{ \begin{bmatrix}1&0&0\\0&1&0\\0&0&1\end{bmatrix} }_{\textstyle I}}^{\textstyle 1} + \overbrace{\underbrace{ \begin{bmatrix}-\ c^2-d^2&bc&bd\\bc&-\ b^2-d^2&cd\\bd&cd&-\ b^2-c^2\end{bmatrix} }_{\textstyle ([u]_{\times})^2}}^{\textstyle 2} + \overbrace{\underbrace{ \begin{bmatrix}0&-d&c\\d&0&-b\\-c&b&0\end{bmatrix} }_{\textstyle [u]_{\times}}}^{\textstyle 2a} }$ $\color{red}{ R_q = I + 2([u]_{\times})^2 + 2a[u]_{\times} }$ 12/12 or 9/15 $ R_q = I + 2[u]_{\times}([u]_{\times} + aI) $ <jupyter_code>def matrix(): # temp tb = b * 2; tc = c * 2; td = d * 2; # ab = tb * a; ac = tc * a; ad = td * a; # cross bb = tb * b; cc = tc * c; dd = td * d; # dot bc = tb * c; bd = tb * d; cd = tc * d; # dot # matrix elements xx = 1 - cc - dd; xy = bc - ad; xz = bd + ac; # x' row yx = bc + ad; yy = 1 - bb - dd; yz = cd - ab; # y' row zx = bd - ac; zy = cd + ab; zz = 1 - bb - cc; # z' row # 12 multiplications # 12 additions/subtractions<jupyter_output><empty_output><jupyter_text>##### quaternion to rotate vector $ \vec{v'} = q\vec{v}q^{-1} = (a+\vec{u}) \ \vec{v} \ (a-\vec{u}) = \vec{v} + 2\vec{u}\times(\vec{u}\times\vec{v}) + 2a(\vec{u}\times\vec{v}) $ <jupyter_code>def rotate(): # temp tx = (c * z - d * y) * 2; ty = (d * x - b * z) * 2; tz = (b * y - c * x) * 2; # vector _x = x + (c * tz - d * ty) + a * tx; # x' _y = y + (d * tx - b * tz) + a * ty; # y' _z = z + (b * ty - c * tx) + a * tz; # z' # 18 multiplications # 12 additions/subtractions<jupyter_output><empty_output><jupyter_text>##### quaternion to rotate vector $ \vec{v'} = q\vec{v}q^{-1} = (a+\vec{u}) \ \vec{v} \ (a-\vec{u}) = \vec{v} + 2\vec{u}\times(\vec{u}\times\vec{v}+a\vec{v}) $ <jupyter_code>def rotate(): # temp tx = (c * z - d * y) + a * x; ty = (d * x - b * z) + a * y; tz = (b * y - c * x) + a * z; # vector _x = x + (c * tz - d * ty) * 2; # x' _y = y + (d * tx - b * tz) * 2; # y' _z = z + (b * ty - c * tx) * 2; # z' # 18 multiplications # 12 additions/subtractions<jupyter_output><empty_output>
no_license
/math/Quaternion.ipynb
sultan/test
3